- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 371 for _kernel (0.25 sec)
-
src/net/tcpsock_posix.go
// at addr2, without either machine executing Listen. If laddr == nil, // it means we want the kernel to pick an appropriate originating local // address. Some Linux kernels cycle blindly through a fixed range of // local ports, regardless of destination port. If a kernel happens to // pick local port 50001 as the source for a Dial("tcp", "", "localhost:50001"),
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 16 16:54:32 UTC 2024 - 6.3K bytes - Viewed (0) -
src/net/ipsock_posix.go
// and kernel configuration. // // Should we try to use the IPv4 socket interface if we're only // dealing with IPv4 sockets? As long as the host system understands // IPv4-mapped IPv6, it's okay to pass IPv4-mapped IPv6 addresses to // the IPv6 interface. That simplifies our code and is most // general. Unfortunately, we need to run on kernels built without
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 8.6K bytes - Viewed (0) -
tensorflow/c/c_test.c
TF_DeleteStatus(s); return NULL; } // A compute function. This will never actually get called in this test, it's // just nice to know that it compiles. void compute(void* kernel, TF_OpKernelContext* ctx) { TF_Tensor* input; TF_Status* s = TF_NewStatus(); TF_GetInput(ctx, 0, &input, s); TF_DeleteTensor(input); TF_DeleteStatus(s); } // Exercises tensorflow's C API.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:50:35 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.cc
}); } } void XlaDevice::Compute(OpKernel* op_kernel, OpKernelContext* context) { VLOG(2) << "XlaDevice::Compute " << op_kernel->name() << ":" << op_kernel->type_string(); ShowXlaDeviceDeprecationWarning(jit_device_name_.type_string()); op_kernel->Compute(context); } void XlaDevice::ComputeAsync(AsyncOpKernel* op_kernel, OpKernelContext* context,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_to_mhlo_int_test.cc
std::optional<absl::string_view> tf_program = std::nullopt, double error_tolerance = 0.1) { // Expected result is calculated by evaluating using TF kernels. In some // cases, TF kernel behaves differently from lowered graph (e.g. Hybrid // ops). So we optionally use a different graph to calculate the expected // result. TF_ASSERT_OK_AND_ASSIGN( auto expected,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 03 01:03:21 UTC 2024 - 35.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/debugging/dump_tensor_op.cc
.Attr("T: type") .Attr("enabled: bool") .Attr("func_name: string") .Attr("node_name: string") .SetIsStateful(); class DumpTensorOp : public OpKernel { public: explicit DumpTensorOp(OpKernelConstruction* ctx) : OpKernel(ctx) { string log_dir_path; string file_name; string func_name; string node_name; OP_REQUIRES_OK(ctx, ctx->GetAttr("log_dir_path", &log_dir_path));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 03:12:17 UTC 2024 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_hashtables.cc
} for (auto hashtable : hashtables) { auto key_dtype = hashtable.getKeyDtype(); auto value_dtype = hashtable.getValueDtype(); // Only allow string -> int64 and int64 -> string mappings due to kernel // capability. if (!((mlir::isa<TF::StringType>(key_dtype) && mlir::isa<IntegerType>(value_dtype) && mlir::cast<IntegerType>(value_dtype).getWidth() == 64) ||
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_cpu_device.cc
return status; } devices->push_back(std::move(device)); return absl::OkStatus(); } REGISTER_LOCAL_DEVICE_FACTORY(DEVICE_XLA_CPU, XlaCpuDeviceFactory); // Kernel registrations constexpr std::array<DataType, 18> kAllXlaCpuTypes = { {DT_UINT8, DT_QUINT8, DT_UINT16, DT_INT8, DT_QINT8, DT_INT16, DT_INT32, DT_QINT32, DT_INT64, DT_HALF, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/calibrator/custom_aggregator_op.cc
c->set_output(3, c->MakeShape({num_bins_attr->i()})); return absl::OkStatus(); }); class CustomAggregatorOp : public OpKernel { public: explicit CustomAggregatorOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("id", &id_)); int calibration_method_value; int num_bins; float min_percentile;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 01:09:50 UTC 2024 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.td
TFRT_CostFunctionInterface, TFRT_FixedCost<1>]> { let summary = "Copy the CPU fallback tensor if it is small"; let description = [{ This kernel performs deep copy on the input tensor if it is small, to avoid atomic contention on its refcount. Note that this kernel always create a new AsyncValue for each result to avoid atomic contention on AsyncValue's refcount. }]; let arguments = (ins
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 15:01:21 UTC 2024 - 15.8K bytes - Viewed (0)