- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 56 for PartitionedCall (0.17 sec)
-
tensorflow/compiler/jit/mark_for_compilation_pass.cc
using jit::DeviceId; using jit::DeviceSet; // The clusters we create here are eventually lowered into an // _XlaCompile/_XlaRun pair with a TF executor "fallback" that uses the // PartitionedCall op to execute the cluster in the regular graph executor if // need be. PartitionedCall, however, reruns the entire TF graph optimization // pipeline over the cluster which includes this mark for compilation pass. To
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 85.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/shape_inference.mlir
// CHECK-SAME: -> tensor<20xi32> func.func @stateful_partitioned_call(%arg0: tensor<20xi32>, %arg1: tensor<?xi32>) -> tensor<*xi32> { // CHECK: tf.PartitionedCall // CHECK-SAME: (tensor<20xi32>) -> tensor<20xi32> %0 = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @partitioned_called_func} : (tensor<20xi32>) -> tensor<*xi32> // CHECK: tf.StatefulPartitionedCall
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 23 17:24:10 UTC 2024 - 167.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_main_function.cc
for (int i = 0; i < num_results; ++i) { main_func.setResultAttr( i, kTfSavedModelIndexPathAttr, ArrayAttr::get(context, {StringAttr::get(context, output_names[i])})); } // Creates PartitionedCall ops to call exported functions. auto guard = OpBuilder::InsertionGuard(builder); int arg_idx = 0; int result_idx = 0; llvm::SmallVector<Value> call_op_returns;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 16.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.td
} def XlaClusterFormationPass : Pass<"tf-xla-cluster-formation", "ModuleOp"> { let summary = "Encapsulate partitioned calls within a Cluster op"; let description = [{ This pass clusters `tf.PartitionedCall` and `tf.StatefulPartitionedCall` with `_xla_compile_device_type` attribute into a `tf_device.cluster`. Notice this pass will only rewrite the outermost call if there are nested
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 02:01:13 UTC 2024 - 19.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/control_flow.mlir
// CHECK-NEXT: [[results_1:%.*]]:2 = tfrt.call @callee([[chain]] // CHECK-SAME: (!tfrt.chain, !tfrt_fallback.tf_tensor) -> (!tfrt.chain, !tfrt_fallback.tf_tensor) %2 = "tf.PartitionedCall"(%0) {config = "", config_proto = "", executor_type = "", f = @callee} : (tensor<i32>) -> (tensor<i32>) // CHECK-NEXT: [[results_2:%.*]]:2 = tfrt.call @callee([[chain]]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 00:40:32 UTC 2024 - 17.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 22.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_validate_inputs.cc
// and non-xla as well. But below function specifically checks for the op to be // only XLA op. bool IsMustBeXlaOp(Operation* op, MetadataMap metadata_map) { // All PartitionedCall are inlined-out before XLA. // So MustBeXLA should return false if (IsPartitionedOp(op)) return false; if (!op->hasAttr(TF::kTpuReplicateAttr)) return false;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 07 06:51:01 UTC 2024 - 21.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
for (auto current_type : result_types) { if (mlir::dyn_cast<TensorType>(current_type).getElementType().isF32()) return true; } return false; } // Unwraps quantization parameters of PartitionedCall ops with quantized // input/outputs that are created from QuantizePass. class QuantizeFunctionPattern : public mlir::OpRewritePattern<TF::PartitionedCallOp> { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_sharding_identification.mlir
%1:2 = "tf.StatefulPartitionedCall"(%arg0){f= @func_body, config="", config_proto="", executor_type=""} : (tensor<*xi32>) -> (tensor<*xi32>, tensor<*xi1>) %2 = "tf.PartitionedCall"(%arg1) {config = "", config_proto = "", executor_type = "", f = @pcall_func_body} : (tensor<*xi1>) -> (tensor<i32>) %3, %4 = "tf.A"(%1#0, %2) : (tensor<*xi32>, tensor<i32>) -> (tensor<*xi32>, tensor<*xi1>)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Feb 20 19:07:52 UTC 2024 - 47.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 26.2K bytes - Viewed (0)