Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 484 for _kernel (0.14 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_to_mhlo_int_test.cc

          std::optional<absl::string_view> tf_program = std::nullopt,
          double error_tolerance = 0.1) {
        // Expected result is calculated by evaluating using TF kernels. In some
        // cases, TF kernel behaves differently from lowered graph (e.g. Hybrid
        // ops). So we optionally use a different graph to calculate the expected
        // result.
        TF_ASSERT_OK_AND_ASSIGN(
            auto expected,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 03 01:03:21 UTC 2024
    - 35.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/debugging/dump_tensor_op.cc

        .Attr("T: type")
        .Attr("enabled: bool")
        .Attr("func_name: string")
        .Attr("node_name: string")
        .SetIsStateful();
    
    class DumpTensorOp : public OpKernel {
     public:
      explicit DumpTensorOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
        string log_dir_path;
        string file_name;
        string func_name;
        string node_name;
        OP_REQUIRES_OK(ctx, ctx->GetAttr("log_dir_path", &log_dir_path));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 03:12:17 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/legalize_hashtables.cc

      }
    
      for (auto hashtable : hashtables) {
        auto key_dtype = hashtable.getKeyDtype();
        auto value_dtype = hashtable.getValueDtype();
    
        // Only allow string -> int64 and int64 -> string mappings due to kernel
        // capability.
        if (!((mlir::isa<TF::StringType>(key_dtype) &&
               mlir::isa<IntegerType>(value_dtype) &&
               mlir::cast<IntegerType>(value_dtype).getWidth() == 64) ||
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_cpu_device.cc

        return status;
      }
      devices->push_back(std::move(device));
      return absl::OkStatus();
    }
    
    REGISTER_LOCAL_DEVICE_FACTORY(DEVICE_XLA_CPU, XlaCpuDeviceFactory);
    
    // Kernel registrations
    
    constexpr std::array<DataType, 18> kAllXlaCpuTypes = {
        {DT_UINT8, DT_QUINT8, DT_UINT16, DT_INT8, DT_QINT8, DT_INT16, DT_INT32,
         DT_QINT32, DT_INT64, DT_HALF, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/calibrator/custom_aggregator_op.cc

          c->set_output(3, c->MakeShape({num_bins_attr->i()}));
    
          return absl::OkStatus();
        });
    
    class CustomAggregatorOp : public OpKernel {
     public:
      explicit CustomAggregatorOp(OpKernelConstruction* context)
          : OpKernel(context) {
        OP_REQUIRES_OK(context, context->GetAttr("id", &id_));
    
        int calibration_method_value;
        int num_bins;
        float min_percentile;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.td

         TFRT_CostFunctionInterface, TFRT_FixedCost<1>]> {
      let summary = "Copy the CPU fallback tensor if it is small";
    
      let description = [{
        This kernel performs deep copy on the input tensor if it is small, to avoid
        atomic contention on its refcount.
    
        Note that this kernel always create a new AsyncValue for each result to
        avoid atomic contention on AsyncValue's refcount.
      }];
    
      let arguments = (ins
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 15:01:21 UTC 2024
    - 15.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.h

      mlir::LogicalResult PrepareParams();
    
      // Given the required_consts, it will fill the 3 output vectors with
      // their respective data.
      // Expressions: Output XLA expressions as required by the compiled kernel.
      // Tensors: Vector of tensors that back the TensorValue inputs
      // Inputs: Vector of inputs that are backed by tensors.
      mlir::LogicalResult PrepareKernelInputs(
          const llvm::SmallDenseSet<int>& required_consts,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:16:07 UTC 2024
    - 5K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/compilability_check_util.h

    // e.g. `Add`, that expects its inputs in device memory. Here is how it
    // works now.
    // First, what do we mean by "op expects an input in XYZ memory"?
    // There are two types of "ops" here: the tf2xla kernel and the HLO
    // computation it builds. The tf2xla kernel needs to retrieve the actual
    // numeric value of the compile-time constant tensors, so it really expects
    // them to be on in host memory. However, for other inputs, it refers to them
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 14.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op.cc

    calibration method and save the result to the given file path as a binary
    proto file.)doc");
    
    class CalibrationStatisticsSaverOp : public OpKernel {
     public:
      explicit CalibrationStatisticsSaverOp(
          absl::Nonnull<OpKernelConstruction*> context)
          : OpKernel(context) {
        std::string output_file_path;
        OP_REQUIRES_OK(context,
                       context->GetAttr("output_file_path", &output_file_path));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 13 01:31:23 UTC 2024
    - 8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfrt/ir/mlrt/tf_mlrt_ops.td

    }
    
    def BatchFunctionOp : TensorflowMlrt_Op<"batch_function", [Pure]> {
      let summary = "Fallback ExecuteOp specialized for tf.BatchFunction.";
    
      let description = [{
        This kernel executes a variant tf.BatchFunction kernel that supports having
        the `f` attribute as a bytecode function.
    
        Example:
          %res = tf_mlrt.batch_function(%input, %captured_input)  {
              device = "/device:CPU:0",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 20:44:15 UTC 2024
    - 13.6K bytes
    - Viewed (0)
Back to top