- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 131 for Computation (0.17 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/tpu_resource_partitioning.mlir
%1 = "tf.ReadVariableOp"(%0) : (tensor<!tf_type.resource<tensor<i32>>>) -> tensor<i32> // CHECK: [[COMPUTATION:%.+]] = "tf_device.cluster_func"([[INPUT]]) %2 = "tf_device.cluster_func"(%1) {func = @computation, use_spmd_for_xla_partitioning = true} : (tensor<i32>) -> tensor<i32> // CHECK: [[OUTPUT:%.+]]:2 = "tf.TPUPartitionedOutputV2"([[COMPUTATION]]) // CHECK-SAME: _XlaSharding = "" // CHECK-SAME: partition_dims = []
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 23 23:53:20 UTC 2024 - 15.7K bytes - Viewed (0) -
tensorflow/compiler/aot/compile.cc
return true; } namespace { // Compiles the XLA computation into executable code. Status CompileXla(xla::CompileOnlyClient* client, const xla::XlaComputation& computation, const xla::cpu::CpuAotCompilationOptions& aot_opts, CompileResult* compile_result) { // Retrieves arg and result layouts from the computation. // TODO(toddw): Should we let the user choose the major/minor ordering?
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 15 08:28:57 UTC 2024 - 11.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter_test.cc
xla::Tuple(&builder, tuple_values); TF_ASSERT_OK_AND_ASSIGN(XlaComputation computation, builder.Build()); EXPECT_EQ(computation.proto().computations_size(), 2); TF_ASSERT_OK(CreateMlirModule()); TF_ASSERT_OK_AND_ASSIGN(TupleOp root_tuple, ImportXlaComputationIntoModule(computation)); EXPECT_TRUE(root_tuple); int num_func_ops = 0;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:16:07 UTC 2024 - 11.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.td
replicated TPU computation. The number of times a TPU computation is replicated is defined in the `tf.TPUReplicateMetadata` op (`num_replicas` attribute) and operand and result sizes of `tf.TPUReplicatedInput` and `tf.TPUReplicatedOutput` respectively must match, excluding packed tensors. It is also assumed ops of the same TPU computation do not have ops outside
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 02:01:13 UTC 2024 - 19.8K bytes - Viewed (0) -
tensorflow/compiler/aot/codegen_test_h.golden
namespace foo { namespace bar { // MyClass represents a computation previously specified in a // TensorFlow graph, now compiled into executable code. This extends the generic // XlaCompiledCpuFunction class with statically type-safe arg and result // methods. Usage example: // // MyClass computation; // // ...set args using computation.argN methods // CHECK(computation.Run()); // // ...inspect results using computation.resultN methods
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 01:20:01 UTC 2024 - 16.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.cc
} if (output_to_input_alias.empty()) return absl::OkStatus(); xla::HloModuleProto* module_proto = compilation_result->computation->mutable_proto(); absl::StatusOr<xla::ProgramShape> program_shape_or_status = compilation_result->computation->GetProgramShape(); TF_RET_CHECK(program_shape_or_status.ok()); xla::ProgramShape& program_shape = program_shape_or_status.value();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 22:19:26 UTC 2024 - 14K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tf_device_passes.td
// TF device dialect passes. def ResourceOpLiftingPass : Pass<"tf-resource-op-lifting", "ModuleOp"> { let summary = "Lifting resource operations out of device computation"; let description = [{ This pass lifts resource variable operations outside of device computation. This is useful because a lot of accelerator devices can not interact with resource variables directly..
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/tf_xla_mlir_translate.cc
xla::XlaComputation computation, return_value.valid() ? builder.Build(return_value) : builder.Build()); auto hlo_module = computation.proto(); xla::HloProto hlo_proto; hlo_proto.mutable_hlo_module()->Swap(&hlo_module); compilation_result->computation = std::make_shared<xla::XlaComputation>(); xla::XlaComputation* xla_computation = compilation_result->computation.get();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 18.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/extract_head_tail_outside_compilation.cc
} // Returns a set of ops that are outside compiled and can be extracted to before // the TPU computation. These ops are either connected to the inputs of the TPU // computation or other ops that can be extracted, and have no operands from // other ops in the TPU computation that cannot be extracted. llvm::SmallVector<Operation*, 4> FindOutsideCompiledOpsAtHead(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.cc
XlaComputation& computation) { xla::DebugOptions debug_options; TF_ASSIGN_OR_RETURN(auto hlo_module_config, xla::HloModule::CreateModuleConfigFromProto( computation.proto(), debug_options)); TF_ASSIGN_OR_RETURN( std::unique_ptr<xla::HloModule> hlo_module, xla::HloModule::CreateFromProto(computation.proto(), hlo_module_config));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:16:07 UTC 2024 - 18.9K bytes - Viewed (0)