Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 650 for _kernel (0.11 sec)

  1. tensorflow/c/c_test.c

      TF_DeleteStatus(s);
      return NULL;
    }
    
    // A compute function. This will never actually get called in this test, it's
    // just nice to know that it compiles.
    void compute(void* kernel, TF_OpKernelContext* ctx) {
      TF_Tensor* input;
      TF_Status* s = TF_NewStatus();
      TF_GetInput(ctx, 0, &input, s);
      TF_DeleteTensor(input);
      TF_DeleteStatus(s);
    }
    
    // Exercises tensorflow's C API.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:50:35 UTC 2024
    - 2.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_device.cc

        });
      }
    }
    
    void XlaDevice::Compute(OpKernel* op_kernel, OpKernelContext* context) {
      VLOG(2) << "XlaDevice::Compute " << op_kernel->name() << ":"
              << op_kernel->type_string();
      ShowXlaDeviceDeprecationWarning(jit_device_name_.type_string());
      op_kernel->Compute(context);
    }
    
    void XlaDevice::ComputeAsync(AsyncOpKernel* op_kernel, OpKernelContext* context,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 21:05:42 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_to_mhlo_int_test.cc

          std::optional<absl::string_view> tf_program = std::nullopt,
          double error_tolerance = 0.1) {
        // Expected result is calculated by evaluating using TF kernels. In some
        // cases, TF kernel behaves differently from lowered graph (e.g. Hybrid
        // ops). So we optionally use a different graph to calculate the expected
        // result.
        TF_ASSERT_OK_AND_ASSIGN(
            auto expected,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 03 01:03:21 UTC 2024
    - 35.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/debugging/dump_tensor_op.cc

        .Attr("T: type")
        .Attr("enabled: bool")
        .Attr("func_name: string")
        .Attr("node_name: string")
        .SetIsStateful();
    
    class DumpTensorOp : public OpKernel {
     public:
      explicit DumpTensorOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
        string log_dir_path;
        string file_name;
        string func_name;
        string node_name;
        OP_REQUIRES_OK(ctx, ctx->GetAttr("log_dir_path", &log_dir_path));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 03:12:17 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfr/integration/node_expansion_test.py

      def testWithKnownKernel(self):
    
        def biasd_dense_elu(x, y, z):
          dot = gen_composite_ops.my_biased_dense(x, y, z)
          return nn_ops.elu(dot)  # with known kernel, should not expand.
    
        t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
        t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
        t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 3.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/legalize_hashtables.cc

      }
    
      for (auto hashtable : hashtables) {
        auto key_dtype = hashtable.getKeyDtype();
        auto value_dtype = hashtable.getValueDtype();
    
        // Only allow string -> int64 and int64 -> string mappings due to kernel
        // capability.
        if (!((mlir::isa<TF::StringType>(key_dtype) &&
               mlir::isa<IntegerType>(value_dtype) &&
               mlir::cast<IntegerType>(value_dtype).getWidth() == 64) ||
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/xla_cpu_device.cc

        return status;
      }
      devices->push_back(std::move(device));
      return absl::OkStatus();
    }
    
    REGISTER_LOCAL_DEVICE_FACTORY(DEVICE_XLA_CPU, XlaCpuDeviceFactory);
    
    // Kernel registrations
    
    constexpr std::array<DataType, 18> kAllXlaCpuTypes = {
        {DT_UINT8, DT_QUINT8, DT_UINT16, DT_INT8, DT_QINT8, DT_INT16, DT_INT32,
         DT_QINT32, DT_INT64, DT_HALF, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tfrt/runtime_fallback/runtime_fallback_ops.td

      }];
    
      let arguments = (ins TFRT_ChainType);
      let results = (outs TFRT_ChainType);
    }
    
    def DelegateKernelOp : RuntimeFallbackDialect_Op<"delegate_kernel"> {
      let summary = "delegate kernel operation";
      let description = [{
        The "tfd.delegate_kernel" operation takes an input chain, and arbitrary
        number of input arguments, and runs a specified TF op via TFE C API. It
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 23 19:35:12 UTC 2023
    - 5.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/calibrator/custom_aggregator_op.cc

          c->set_output(3, c->MakeShape({num_bins_attr->i()}));
    
          return absl::OkStatus();
        });
    
    class CustomAggregatorOp : public OpKernel {
     public:
      explicit CustomAggregatorOp(OpKernelConstruction* context)
          : OpKernel(context) {
        OP_REQUIRES_OK(context, context->GetAttr("id", &id_));
    
        int calibration_method_value;
        int num_bins;
        float min_percentile;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.td

         TFRT_CostFunctionInterface, TFRT_FixedCost<1>]> {
      let summary = "Copy the CPU fallback tensor if it is small";
    
      let description = [{
        This kernel performs deep copy on the input tensor if it is small, to avoid
        atomic contention on its refcount.
    
        Note that this kernel always create a new AsyncValue for each result to
        avoid atomic contention on AsyncValue's refcount.
      }];
    
      let arguments = (ins
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 15:01:21 UTC 2024
    - 15.8K bytes
    - Viewed (0)
Back to top