- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 91 for PartitionedCall (0.24 sec)
-
tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.cc
// Guarantee all functions have one use, which enables more exact shape // inference. pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass()); pm.addPass(mlir::TF::CreateTFShapeInferencePass()); // Encapsulate PartitionedCall ops within a cluster so that the composite // resource ops can be decomposed. pm.addPass(tensorflow::tf2xla::internal::CreateXlaClusterFormationPass());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 16:09:14 UTC 2024 - 11.2K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass.cc
using jit::DeviceId; using jit::DeviceSet; // The clusters we create here are eventually lowered into an // _XlaCompile/_XlaRun pair with a TF executor "fallback" that uses the // PartitionedCall op to execute the cluster in the regular graph executor if // need be. PartitionedCall, however, reruns the entire TF graph optimization // pipeline over the cluster which includes this mark for compilation pass. To
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 85.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/shape_inference.mlir
// CHECK-SAME: -> tensor<20xi32> func.func @stateful_partitioned_call(%arg0: tensor<20xi32>, %arg1: tensor<?xi32>) -> tensor<*xi32> { // CHECK: tf.PartitionedCall // CHECK-SAME: (tensor<20xi32>) -> tensor<20xi32> %0 = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @partitioned_called_func} : (tensor<20xi32>) -> tensor<*xi32> // CHECK: tf.StatefulPartitionedCall
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 23 17:24:10 UTC 2024 - 167.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_main_function.cc
for (int i = 0; i < num_results; ++i) { main_func.setResultAttr( i, kTfSavedModelIndexPathAttr, ArrayAttr::get(context, {StringAttr::get(context, output_names[i])})); } // Creates PartitionedCall ops to call exported functions. auto guard = OpBuilder::InsertionGuard(builder); int arg_idx = 0; int result_idx = 0; llvm::SmallVector<Value> call_op_returns;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 16.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.td
} def XlaClusterFormationPass : Pass<"tf-xla-cluster-formation", "ModuleOp"> { let summary = "Encapsulate partitioned calls within a Cluster op"; let description = [{ This pass clusters `tf.PartitionedCall` and `tf.StatefulPartitionedCall` with `_xla_compile_device_type` attribute into a `tf_device.cluster`. Notice this pass will only rewrite the outermost call if there are nested
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 02:01:13 UTC 2024 - 19.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/resource_op_lifting.mlir
// CHECK: %[[PC0:.*]] = "tf.PartitionedCall"(%[[CONST]], %[[READ]], %[[CONST]]) // CHECK-SAME: f = @callee_resource_lifted %3 = "tf.PartitionedCall"(%1, %0, %1) {f = @callee, config = "", config_proto = "", executor_type = ""} : (tensor<f32>, tensor<*x!tf_type.resource<tensor<f32>>>, tensor<f32>) -> tensor<f32> // CHECK: %[[PC1:.*]] = "tf.PartitionedCall"(%[[CONST]], %[[READ]], %[[CONST]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 74K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/control_flow.mlir
// CHECK-NEXT: [[results_1:%.*]]:2 = tfrt.call @callee([[chain]] // CHECK-SAME: (!tfrt.chain, !tfrt_fallback.tf_tensor) -> (!tfrt.chain, !tfrt_fallback.tf_tensor) %2 = "tf.PartitionedCall"(%0) {config = "", config_proto = "", executor_type = "", f = @callee} : (tensor<i32>) -> (tensor<i32>) // CHECK-NEXT: [[results_2:%.*]]:2 = tfrt.call @callee([[chain]]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 00:40:32 UTC 2024 - 17.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 22.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_validate_inputs.cc
// and non-xla as well. But below function specifically checks for the op to be // only XLA op. bool IsMustBeXlaOp(Operation* op, MetadataMap metadata_map) { // All PartitionedCall are inlined-out before XLA. // So MustBeXLA should return false if (IsPartitionedOp(op)) return false; if (!op->hasAttr(TF::kTpuReplicateAttr)) return false;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 07 06:51:01 UTC 2024 - 21.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
for (auto current_type : result_types) { if (mlir::dyn_cast<TensorType>(current_type).getElementType().isF32()) return true; } return false; } // Unwraps quantization parameters of PartitionedCall ops with quantized // input/outputs that are created from QuantizePass. class QuantizeFunctionPattern : public mlir::OpRewritePattern<TF::PartitionedCallOp> { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0)