- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 216 for _kernel (0.16 sec)
-
tensorflow/compiler/jit/xla_device.cc
}); } } void XlaDevice::Compute(OpKernel* op_kernel, OpKernelContext* context) { VLOG(2) << "XlaDevice::Compute " << op_kernel->name() << ":" << op_kernel->type_string(); ShowXlaDeviceDeprecationWarning(jit_device_name_.type_string()); op_kernel->Compute(context); } void XlaDevice::ComputeAsync(AsyncOpKernel* op_kernel, OpKernelContext* context,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_to_mhlo_int_test.cc
std::optional<absl::string_view> tf_program = std::nullopt, double error_tolerance = 0.1) { // Expected result is calculated by evaluating using TF kernels. In some // cases, TF kernel behaves differently from lowered graph (e.g. Hybrid // ops). So we optionally use a different graph to calculate the expected // result. TF_ASSERT_OK_AND_ASSIGN( auto expected,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 03 01:03:21 UTC 2024 - 35.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.td
TFRT_CostFunctionInterface, TFRT_FixedCost<1>]> { let summary = "Copy the CPU fallback tensor if it is small"; let description = [{ This kernel performs deep copy on the input tensor if it is small, to avoid atomic contention on its refcount. Note that this kernel always create a new AsyncValue for each result to avoid atomic contention on AsyncValue's refcount. }]; let arguments = (ins
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 15:01:21 UTC 2024 - 15.8K bytes - Viewed (0) -
tensorflow/compiler/jit/compilability_check_util.h
// e.g. `Add`, that expects its inputs in device memory. Here is how it // works now. // First, what do we mean by "op expects an input in XYZ memory"? // There are two types of "ops" here: the tf2xla kernel and the HLO // computation it builds. The tf2xla kernel needs to retrieve the actual // numeric value of the compile-time constant tensors, so it really expects // them to be on in host memory. However, for other inputs, it refers to them
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 14.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/ir/mlrt/tf_mlrt_ops.td
} def BatchFunctionOp : TensorflowMlrt_Op<"batch_function", [Pure]> { let summary = "Fallback ExecuteOp specialized for tf.BatchFunction."; let description = [{ This kernel executes a variant tf.BatchFunction kernel that supports having the `f` attribute as a bytecode function. Example: %res = tf_mlrt.batch_function(%input, %captured_input) { device = "/device:CPU:0",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 20:44:15 UTC 2024 - 13.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op_test.cc
#include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/test.h" #include "tsl/platform/errors.h" #include "tsl/platform/status.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 13 01:31:23 UTC 2024 - 11.4K bytes - Viewed (0) -
pkg/proxy/ipvs/README.md
``` kernel/net/ipv4/netfilter/nf_conntrack_ipv4.ko kernel/net/netfilter/ipvs/ip_vs.ko kernel/net/netfilter/ipvs/ip_vs_rr.ko kernel/net/netfilter/ipvs/ip_vs_wrr.ko kernel/net/netfilter/ipvs/ip_vs_lc.ko kernel/net/netfilter/ipvs/ip_vs_wlc.ko kernel/net/netfilter/ipvs/ip_vs_fo.ko kernel/net/netfilter/ipvs/ip_vs_ovf.ko kernel/net/netfilter/ipvs/ip_vs_lblc.ko kernel/net/netfilter/ipvs/ip_vs_lblcr.ko
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Oct 20 02:32:29 UTC 2021 - 18.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/QuantOps.td
// derived from other ports by the target specification of the kernel. def Quantization_QuantizeRegionOp : Quantization_Op<"region", [ Pure, IsolatedFromAbove, SingleBlockImplicitTerminator<"ReturnOp">]> { let summary = [{ The `region` operation wraps high-precision ops as a logical low-precision quantized kernel. }]; let arguments = (ins Variadic<AnyType>:$inputs,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 09 03:10:59 UTC 2024 - 10.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.td
// derived from other ports by the target specification of the kernel. def quantfork_QuantizeRegionOp : quantfork_Op<"region", [ Pure, IsolatedFromAbove, SingleBlockImplicitTerminator<"ReturnOp">]> { let summary = [{ The `region` operation wraps high-precision ops as a logical low-precision quantized kernel. }]; let arguments = (ins Variadic<AnyType>:$inputs,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Oct 13 12:46:08 UTC 2022 - 10.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/converter_python_api.cc
return absl::OkStatus(); }); // Register the corresponding fake op kernel. const char* node_name = opdef.name().c_str(); const char* op_name = opdef.name().c_str(); const char* device_name = "CPU"; static auto fake_compute_func = [](void* kernel, TF_OpKernelContext* ctx) { }; TF_KernelBuilder* builder =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 19.2K bytes - Viewed (0)