- Sort Score
- Result 10 results
- Languages All
Results 91 - 100 of 333 for computations (0.16 sec)
-
tensorflow/compiler/jit/device_compilation_cache.h
executable.get()); int64_t hlo_module_size = 0; if (compilation_result != nullptr && compilation_result->computation != nullptr) { hlo_module_size = compilation_result->computation->proto().ByteSizeLong(); } return absl::StrCat( "{compile_state: ", compile_state, ", request_count: ", request_count,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Oct 12 08:49:52 UTC 2023 - 8.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.h
CreateTPUClusterFormationPass(bool strict_clusters = false); // Creates a pass that extracts outside compilation (Host ops inside device // cluster) at head/tail of Device cluster to run before/after XLA computation. std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>> CreateExtractHeadTailOutsideCompilationPass(); // Creates a pass that extract outside compilation (Host ops inside cevice
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 02:01:13 UTC 2024 - 3.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph_test.cc
std::vector<std::vector<xla::Shape>> per_core_arg_shapes; tpu::TPUCompileMetadataProto metadata_proto; std::vector<TensorShape> arg_shapes; if (computation.index() == 0) { TF_RETURN_IF_ERROR(tensorflow::tf2xla::internal::ConfigureMetadata( std::get<0>(computation).mlir_module, arg_shapes, metadata_proto)); } XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 08:08:57 UTC 2024 - 11.7K bytes - Viewed (0) -
src/internal/types/testdata/check/decls2/decls2a.go
// it's double-declared (it would cost extra in the common case to verify // this). But the MethodSet computation will not find it due to the name // collision caused by the double-declaration, leading to an internal // inconsistency while we are verifying one computation against the other. // var _ = T1c{}.Pointer // T2's method declared before the type. func (*T2) f /* ERROR "field and method" */ () {}
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 19:19:55 UTC 2024 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/aot/codegen.cc
{{DECLS_FROM_OBJ_FILE}} {{NS_START}} // {{CLASS}} represents a computation previously specified in a // TensorFlow graph, now compiled into executable code. This extends the generic // XlaCompiledCpuFunction class with statically type-safe arg and result // methods. Usage example: // // {{CLASS}} computation; // // ...set args using computation.argN methods // CHECK(computation.Run());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 01:20:01 UTC 2024 - 36.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/move_tpu_compile_to_front.cc
op = op->getParentOp(); } }); } int OutsideCompilationOrdering(Operation* predecessor, Operation* op) { // Actual compilations go first. if (op->hasAttr("_is_compilation")) return 2; // Followed by nested ops that contain compilations. if (op->hasAttr("_wraps_compilation")) return 1; // Followed by everything else. return 0; } void MoveTpuCompileToFrontPass::runOnOperation() {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 17 00:26:18 UTC 2023 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h
// use_tuple_args: when this is true, always create a tuple argument for the // entry computation. // enable_op_fallback: when this is true, prefer tf2xla fallback kernels over // MLIR // native kernels for legalization to HLO. // return_tuple: when this is true, always create a tuple result for the // entry computation. // shape_determination_fns: Contains layout preference fn and shape
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 17:24:39 UTC 2024 - 10.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_patterns.td
// supports float types. tf.round with integer input type will become an // identity op, so we will never face an mhlo.floor with an integer input type. // The pattern matched executes the following computation: // frac = x - floor(x) // to_even = (floor(x) - 2 * floor(0.5 * x)) == 1 // if frac > 0.5 || (frac == 0.5 && to_even) // return floor(x) + 1 // else // return floor(x) def : Pat<(MHLO_SelectOp
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Feb 03 08:58:22 UTC 2024 - 34K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/extract_outside_compilation.cc
} } } // Since we have the outputs from host and device computation after moving // outside compiled ops, we can create the actual parallel_execute regions. // Still, one region is for the host computation for outside compilation and // the other one is for the original Device cluster computation. mlir::tf_device::ParallelExecuteOp CreateFinalParallelExecuteOp(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 21:25:12 UTC 2024 - 68.3K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.h
// For case 3, we need to create a PjRtBuffer from the raw device mem pointer, // and we need to ensure the PjRtBuffer persists till XLA computation is // complete. Therefore we put the newly created PjRtBuffer into `owned_args`. // Caller is responsible to ensure `owned_args` lives till the end of XLA // computation. Status PreparePjRtExecutableArguments( int num_missing_prefix_ctx_inputs, const std::vector<int>& input_mapping,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 11.8K bytes - Viewed (0)