- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 412 for Computation (0.25 sec)
-
tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.h
namespace v1 { // Compiles the given Tensorflow graph into xla::HLO. The result is in // compilation_result. If the input computation is in MLIR, it will be // converted to a Tensorflow graph. Otherwise, the graph compiler will be run. absl::Status CompileTensorflowGraphToHlo( const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>& computation, const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 08:08:57 UTC 2024 - 2.1K bytes - Viewed (0) -
tensorflow/compiler/aot/codegen_test_h.golden
namespace foo { namespace bar { // MyClass represents a computation previously specified in a // TensorFlow graph, now compiled into executable code. This extends the generic // XlaCompiledCpuFunction class with statically type-safe arg and result // methods. Usage example: // // MyClass computation; // // ...set args using computation.argN methods // CHECK(computation.Run()); // // ...inspect results using computation.resultN methods
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 01:20:01 UTC 2024 - 16.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.cc
} if (output_to_input_alias.empty()) return absl::OkStatus(); xla::HloModuleProto* module_proto = compilation_result->computation->mutable_proto(); absl::StatusOr<xla::ProgramShape> program_shape_or_status = compilation_result->computation->GetProgramShape(); TF_RET_CHECK(program_shape_or_status.ok()); xla::ProgramShape& program_shape = program_shape_or_status.value();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 22:19:26 UTC 2024 - 14K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tf_device_passes.td
// TF device dialect passes. def ResourceOpLiftingPass : Pass<"tf-resource-op-lifting", "ModuleOp"> { let summary = "Lifting resource operations out of device computation"; let description = [{ This pass lifts resource variable operations outside of device computation. This is useful because a lot of accelerator devices can not interact with resource variables directly..
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_host_computation_expansion.cc
return llvm::isa<TF::CastOp, TF::IdentityOp>(op); } // Adds outside compilation attributes to unary ops such as Identity/Cast ops // at the head of TPU computation that is used only by other outside compiled // ops. Identity ops and Cast ops is commonly added to the start of TPU // computation. Adding/expanding outside compilation attributes to these ops // will ensure that head outside compiled ops are correctly located and moved to // host.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/functionalize-if.mlir
// RUN: tf-opt %s --run-tf-graph-optimization --graph-passes=FunctionalizeControlFlowForXlaPass | FileCheck %s func.func @main() { tf_executor.graph { %0 = tf_executor.island wraps "tf._TPUReplicate"() {computation = @foo, Tinputs = [], Tbroadcast_inputs = [], NumVariables = 0, Tguaranteed_constants = [], output_types = []} : () -> () loc("_TPUReplicate") tf_executor.fetch } func.return } func.func @foo() { tf_executor.graph {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 28 12:06:33 UTC 2022 - 2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h
"If not a Graph Analysis failure then matches the computation result " "with the regex") { auto graph_analysis_failure = WasGraphAnalysisFailure(arg); if (graph_analysis_failure) { return testing::ExplainMatchResult(testing::IsTrue(), graph_analysis_failure, result_listener); } auto proto = arg.value().computation->proto().DebugString();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 19 22:54:26 UTC 2023 - 3.1K bytes - Viewed (0) -
futures/listenablefuture1/src/com/google/common/util/concurrent/ListenableFuture.java
* The listener will run when the {@code Future}'s computation is {@linkplain Future#isDone() * complete} or, if the computation is already complete, immediately. * * <p>There is no guaranteed ordering of execution of listeners, but any listener added through * this method is guaranteed to be called once the computation is complete. *
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Mon Jun 26 21:13:41 UTC 2023 - 8K bytes - Viewed (0) -
guava/src/com/google/common/util/concurrent/ListenableFuture.java
* The listener will run when the {@code Future}'s computation is {@linkplain Future#isDone() * complete} or, if the computation is already complete, immediately. * * <p>There is no guaranteed ordering of execution of listeners, but any listener added through * this method is guaranteed to be called once the computation is complete. *
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Mon Jun 26 21:13:41 UTC 2023 - 8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/tf_xla_mlir_translate.cc
xla::XlaComputation computation, return_value.valid() ? builder.Build(return_value) : builder.Build()); auto hlo_module = computation.proto(); xla::HloProto hlo_proto; hlo_proto.mutable_hlo_module()->Swap(&hlo_module); compilation_result->computation = std::make_shared<xla::XlaComputation>(); xla::XlaComputation* xla_computation = compilation_result->computation.get();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 18.8K bytes - Viewed (0)