Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 56 for PartitionedCall (0.34 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver.mlir

    func.func @serving_default(%arg0: tensor<1x3x4x3xf32>) -> (tensor<1x2x2x2xf32>) attributes {tf.entry_function = {control_outputs = "", inputs = "serving_default_input_tensor:0", outputs = "PartitionedCall:0"}} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/prepare_tpu_computation_for_tf_export.mlir

      // CHECK: _xla_original_oc_node_name = [[NODE_NAME1:.*]], _xla_token_input_nodes = ["_xla_token_arg_node"]
      %0 = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @PartitionedCall3} : (tensor<i32>) -> (tensor<i32>)
      // CHECK-NOT: _xla_token_input_nodes
      %1 = "tf.PartitionedCall"(%0) {config = "", config_proto = "", executor_type = "", f = @IdentityFunc} : (tensor<i32>) -> (tensor<i32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 18:46:36 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver_with_skipping.mlir

    func.func @serving_default(%arg0: tensor<1x3x4x3xf32>) -> (tensor<1x2x2x2xf32>) attributes {tf.entry_function = {control_outputs = "", inputs = "serving_default_input_tensor:0", outputs = "PartitionedCall:0"}} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/build_xla_ops_pass.cc

    // PartitionedCall are placed on the host, a producer that produces them on the
    // device will incur a D2H copy, even if the PartitionedCall is not executed
    // (i.e. even if we choose to execute the XLA compiled computation via _XlaRun).
    // To prevent this, we add control dependencies to make the int32 input edges
    // into the PartitionedCall dead.  With this change the D2H copy only happens if
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 06:33:33 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/transforms/tf_stablehlo_pass.cc

      Option<bool> skip_partitioned_calls_{
          *this, "skip-partitioned-calls",
          ::llvm::cl::desc(
              "Skip tf.StatefulPartitionedCall and tf.PartitionedCall")};
    };
    
    void TFToMhloPass::runOnOperation() {
      auto func = getOperation();
      MLIRContext *context = func->getContext();
    
      RewritePatternSet patterns(context);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 7.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_xla.mlir

    // CHECK-NEXT: [[q_bias:%.+]] = "quantfork.qcast"([[bias]]) : (tensor<2xf32>) -> tensor<2x!quant.uniform<i32:f32, 0.044022349891595126>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:32:28 UTC 2024
    - 11.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/tf_device_passes.td

        up creating nested XLA launch ops.
    
        For example, the `tf.PartitionedCall` operation in the following code
    
        ```mlir
        func.func @convert_partitioned_call_with_resources(%arg0: tensor<!tf_type.resource>, %arg1: tensor<i32>) -> tensor<i32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 17 18:52:57 UTC 2024
    - 12.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/tests/remove_unused_arguments.mlir

      return %arg0 : tensor<f32>
    }
    
    // CHECK-LABEL: handles_partitioned_function_calls
    func.func @handles_partitioned_function_calls(%arg0: tensor<f32>, %arg1: tensor<f32>) -> tensor<f32> {
      // CHECK: PartitionedCall"()
      %1 = "tf.PartitionedCall"(%arg0, %arg1) {f = @f} : (tensor<f32>, tensor<f32>) -> tensor<f32>
      return %1 : tensor<f32>
    }
    
    // -----
    
    func.func private @f(%arg0: tensor<f32>, %arg1: tensor<f32>) -> tensor<f32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 06 23:00:44 UTC 2024
    - 7K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/xla_cluster_util_test.cc

    }
    
    void CreateSubgraphCallingFunctionWithRefVar(const Scope& s) {
      NameAttrList ref_float_function;
      ref_float_function.set_name("RefFloatFn");
      ops::PartitionedCall call(s.WithOpName("RefFloat"), {absl::Span<Input>{}},
                                {DT_FLOAT}, ref_float_function);
      Output constant =
          ops::Const(s.WithOpName("constant_ref_pco"), Input::Initializer(0.0));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 10.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/tests/components/tf_to_stablehlo.mlir

    // -----
    
    // This test makes sure functions without tf._noinline=true is inlined.
    
    module {
      func.func @partitioned_call(%arg0: tensor<1x2x2x3xf32>) -> (tensor<1x2x2x3xf32>) {
        %0 = "tf.PartitionedCall"(%arg0) <{
          config = "", config_proto = "", executor_type = "", f = @some_func
        }> {
          _collective_manager_ids = [], device = ""
        } : (tensor<1x2x2x3xf32>) -> tensor<1x2x2x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 08 20:05:12 UTC 2024
    - 13.6K bytes
    - Viewed (0)
Back to top