- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 111 for PartitionedCall (0.21 sec)
-
tensorflow/cc/gradients/functional_grad.cc
auto grad = SymbolicGradient(scope, func_inputs, input_dtypes, f); for (int i = 0; i < num_inputs; i++) { grad_outputs->push_back(grad[i]); } return scope.status(); } REGISTER_GRADIENT_OP("PartitionedCall", PartitionedCallGrad); REGISTER_GRADIENT_OP("StatefulPartitionedCall", PartitionedCallGrad); } // anonymous namespace } // namespace ops
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Oct 15 20:09:06 UTC 2021 - 2.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/remove_unused_arguments.mlir
return %arg0 : tensor<f32> } // CHECK-LABEL: handles_partitioned_function_calls func.func @handles_partitioned_function_calls(%arg0: tensor<f32>, %arg1: tensor<f32>) -> tensor<f32> { // CHECK: PartitionedCall"() %1 = "tf.PartitionedCall"(%arg0, %arg1) {f = @f} : (tensor<f32>, tensor<f32>) -> tensor<f32> return %1 : tensor<f32> } // ----- func.func private @f(%arg0: tensor<f32>, %arg1: tensor<f32>) -> tensor<f32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Feb 06 23:00:44 UTC 2024 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/graph-as-function.mlir
%2:2 = tf_executor.island wraps "tf.StatefulPartitionedCall"(%0#0, %arg1) {Tin = ["tfdtype$DT_FLOAT", "tfdtype$DT_RESOURCE"], Tout = ["tfdtype$DT_FLOAT"], _gradient_op_type = "PartitionedCall-1205", config = "", config_proto = "\0A\07\0A\03GPU\10\00\0A\07\0A\03CPU\10\012\02J\008\01", device = "", executor_type = "", f = @function0} : (tensor<f32>, tensor<*x!tf_type.resource<tensor<3x3x1x32xf32>>>) -> tensor<f32> loc("statefulpartitionedcall")...
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 25 12:28:56 UTC 2022 - 3.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tensor_list_ops_decomposition.mlir
: (tensor<!tf_type.variant<tensor<f32>>>, tensor<i1>) -> tensor<!tf_type.variant<tensor<f32>>> // CHECK: %[[CALL2:.*]]:2 = "tf.PartitionedCall"(%[[INIT]], // CHECK-SAME: f = @callee_tensorlist_decomposed %call2 = "tf.PartitionedCall"(%tl, %arg0) {f = @callee, config = "", config_proto = "", executor_type = ""} : (tensor<!tf_type.variant<tensor<f32>>>, tensor<i1>) -> tensor<!tf_type.variant<tensor<f32>>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 38.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_cluster_util_test.cc
} void CreateSubgraphCallingFunctionWithRefVar(const Scope& s) { NameAttrList ref_float_function; ref_float_function.set_name("RefFloatFn"); ops::PartitionedCall call(s.WithOpName("RefFloat"), {absl::Span<Input>{}}, {DT_FLOAT}, ref_float_function); Output constant = ops::Const(s.WithOpName("constant_ref_pco"), Input::Initializer(0.0));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 10.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/components/tf_to_stablehlo.mlir
// ----- // This test makes sure functions without tf._noinline=true is inlined. module { func.func @partitioned_call(%arg0: tensor<1x2x2x3xf32>) -> (tensor<1x2x2x3xf32>) { %0 = "tf.PartitionedCall"(%arg0) <{ config = "", config_proto = "", executor_type = "", f = @some_func }> { _collective_manager_ids = [], device = "" } : (tensor<1x2x2x3xf32>) -> tensor<1x2x2x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 08 20:05:12 UTC 2024 - 13.6K bytes - Viewed (0) -
tensorflow/compiler/jit/force_xla_constants_on_host_pass_test.cc
Output in = ops::Placeholder(root, DT_FLOAT); Output perm = ops::Const(root, {3, 1, 2, 0}); NameAttrList b_name_attr; b_name_attr.set_name("TransposeCall"); ops::PartitionedCall call(root.WithOpName("call"), {in, perm}, {DT_FLOAT}, b_name_attr); call.output.front().node()->AddAttr(kXlaMustCompileAttr, true); std::unique_ptr<Graph> graph;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 4.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/force_shared_name_for_resource_ops.pbtxt
} attr { key: "shape" value { shape { } } } attr { key: "shared_name" value { s: "" } } } node { name: "Call" op: "PartitionedCall" attr { key: "Tin" value { list { } } } attr { key: "Tout" value { list { type: DT_RESOURCE } } } attr {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 31 02:37:48 UTC 2021 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.cc
auto value = device_attr.getValue(); // TODO(b/229028654): Remove string conversion once we have C++17. absl::string_view device_type(value.data(), value.size()); // Device type may be empty for some ops, e.g. tf.PartitionedCall. auto it = std::find(kValidDeviceTypes.begin(), kValidDeviceTypes.end(), device_type); if (it == kValidDeviceTypes.end()) return failure(); return success(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 22:03:30 UTC 2024 - 3.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/graph-as-function.pbtxt
} } } attr { key: "Tout" value { list { type: DT_FLOAT } } } attr { key: "_gradient_op_type" value { s: "PartitionedCall-1205" } } attr { key: "config" value { s: "" } } attr { key: "config_proto" value {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 24 00:18:34 UTC 2023 - 5K bytes - Viewed (0)