Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 10 for xla_run (0.13 sec)

  1. tensorflow/compiler/jit/build_xla_ops_pass.cc

        // "Strict" compilation:  every _XlaCompile invocation must compile the
        // cluster.
        ops::_XlaRun xla_run(root.WithOpName("xla_run"), xla_run_args,
                             xla_compile.key, n->output_types());
    
        MoveOutgoingEdges(g, /*old_node=*/n,
                          /*new_node=*/xla_run.operation.node());
        g->RemoveNode(n);
      } else {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 06:33:33 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/build_xla_ops_pass_test.cc

      auto xla_run =
          NodeWith(Op("_XlaRun"), Inputs(Out(1, predicated_compilation_key)));
      auto tf_call =
          NodeWith(Op("StatefulPartitionedCall"),
                   CtrlDeps(NodeWith(Op("Identity"),
                                     Inputs(Out(0, predicated_compilation_key)))));
      auto merge = NodeWith(Op("_XlaMerge"), Inputs(Out(tf_call), Out(xla_run)));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 12.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/flags.h

        }
    
        // Allow using Device API (PjRt) for `device_type` in the XlaCompile and
        // XlaRun ops. Please note that `enabled_for_compile_and_run_` needs to be
        // true in addition to the `device_type` being allowed in order to use the
        // Device API for single device compilation and execution in the XlaCompile
        // and XlaRun ops.
        void AllowForDeviceInXlaCompileAndRun(const DeviceType& device_type) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 17 18:52:57 UTC 2024
    - 14.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/kernels/xla_ops.cc

                            XlaCompileOp);
    
    REGISTER_KERNEL_BUILDER(Name("_XlaRun").Device(DEVICE_CPU), XlaRunOp);
    REGISTER_KERNEL_BUILDER(Name("_XlaRun").Device(DEVICE_GPU).HostMemory("key"),
                            XlaRunOp);
    REGISTER_KERNEL_BUILDER(
        Name("_XlaRun").Device(DEVICE_DEFAULT).HostMemory("key"), XlaRunOp);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 41.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfrt/tests/hoist_invariant_ops.mlir

    }
    
    }
    
    // -----
    
    module attributes {tf_saved_model.semantics} {
    
    // Test not hoisting callees in xla launch functions.
    
    // CHECK-LABEL: func private @xla_func
    func.func private @xla_func(%arg0: tensor<1x3xf32>) -> tensor<1x3xf32>
      attributes {tf._input_shapes = [#tf_type.shape<1x3>, #tf_type.shape<*>], tf.signature.is_stateful} {
      // CHECK-NOT: tf._TfrtGetResource
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 01 23:54:14 UTC 2024
    - 18.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/ir/tf_device_ops.td

        These operations do not map 1-1 to TensorFlow ops and requires a lowering
        pass later to transform them into Compile/Run op pairs, like XlaCompile and
        XlaRun.
    }];
    
      let cppNamespace = "::mlir::tf_device";
    }
    
    //===----------------------------------------------------------------------===//
    // TensorFlow Device Dialect Ops definitions
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 23 23:53:20 UTC 2024
    - 14.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/partially_decluster_pass.cc

        // benefit of this check.
        // TODO(tpopp): Only apply this if the value being consumed is not output
        // from the cluster to another consumer.
        // TODO(tpopp): See if XlaRun can be modified to avoid this issue
        // completely.
        if (IsShapeConsumerOp(*n)) {
          continue;
        }
        // We assume the only XLA-auto-clusterable operations with side effects are
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Feb 09 11:36:41 UTC 2024
    - 15.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tfrt/tests/mlrt/tf_to_mlrt.mlir

      %1 = "tf.AddV2"(%arg0, %arg1) {__op_key = 0: i32} : (tensor<1x3xf32>, tensor<1x3xf32>) -> tensor<1x3xf32>
      func.return %1 : tensor<1x3xf32>
    }
    
    // CHECK-LABEL: func @xla_func
    func.func @xla_func(%arg0: tensor<1x3xf32>) -> tensor<*xf32> attributes {tf.entry_function = {control_outputs = "", inputs = "input:0", outputs = "output:0"}} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 20:44:15 UTC 2024
    - 24.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/xla_launch_util_test.cc

        CompileToExecutable(args, &result, &executable);
      }
    
      // Must-be-constant inputs that appear in the beginning are stripped out at
      // the time of execution i.e. in XlaRun.
      inputs = {inputs.begin() + constant_input_indices.size(), inputs.end()};
      {
        TF_ASSERT_OK_AND_ASSIGN(std::vector<VariableInfo> updated_variables,
                                GatherVariableInfo(context_.get(), *result,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 28.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td

    def TF_NcclAllReduceOrderingResource : TF_ResourceBase<"NcclAllReduceOrdering">;
    def TF_GlobalIterIdResource : TF_ResourceBase<"GlobalIterId">;
    def TF__XlaRunResource : TF_ResourceBase<"_XlaRun">;
    // Fake resource, see `TF_MustExecute` below.
    def TF_MustExecuteResource : TF_ResourceBase<"MustExecute">;
    
    // Value-based side effects
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 30.5K bytes
    - Viewed (0)
Back to top