- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 295 for _kernel (0.77 sec)
-
src/runtime/vdso_freebsd.go
// license that can be found in the LICENSE file. //go:build freebsd package runtime import ( "internal/runtime/atomic" "unsafe" ) const _VDSO_TH_NUM = 4 // defined in <sys/vdso.h> #ifdef _KERNEL var timekeepSharedPage *vdsoTimekeep //go:nosplit func (bt *bintime) Add(bt2 *bintime) { u := bt.frac bt.frac += bt2.frac if u > bt.frac { bt.sec++ } bt.sec += bt2.sec }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 2.4K bytes - Viewed (0) -
tensorflow/cc/framework/cc_ops_test.cc
auto add = Add(root.WithKernelLabel("AddWithKernelLabel"), 1.0f, 2.0f); TF_EXPECT_OK(root.status()); AttrSlice attrs = add.z.op().node()->attrs(); const auto* kernel_attr = attrs.Find("_kernel"); ASSERT_TRUE(kernel_attr); TF_EXPECT_OK(AttrValueHasType(*kernel_attr, "string")); EXPECT_EQ(kernel_attr->s(), "AddWithKernelLabel"); } TEST(CCOpTest, ColocateWith) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 15 15:13:38 UTC 2023 - 8.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_kernel_creator.cc
*kernel = std::make_unique<XlaLocalLaunchBase>( &construction, constant_arg_indices, resource_arg_indices, function, /*has_ref_vars=*/false); return s; } Status XlaKernelCreator::CreateKernel( FunctionLibraryRuntime* flr, const std::shared_ptr<const NodeProperties>& props, std::unique_ptr<OpKernel>* kernel) const {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 22:24:01 UTC 2023 - 4.4K bytes - Viewed (0) -
tensorflow/c/kernels/summary_op_test.cc
std::unique_ptr<OpKernel> kernel = CreateOpKernel(DeviceType(DEVICE_CPU), nullptr, nullptr, def, 1, &status); ASSERT_TRUE(status.ok()) << status.ToString(); OpKernelContext::Params params; DummyDevice dummy_device(nullptr); params.device = &dummy_device; params.op_kernel = kernel.get(); AllocatorAttributes alloc_attrs; params.output_attr_array = &alloc_attrs;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jul 18 15:10:51 UTC 2022 - 6.7K bytes - Viewed (0) -
tensorflow/c/kernels/bitcast_op_test.cc
std::unique_ptr<OpKernel> kernel = CreateOpKernel(DeviceType(DEVICE_CPU), nullptr, nullptr, def, 1, &status); ASSERT_TRUE(status.ok()) << status.ToString(); OpKernelContext::Params params; DummyDevice dummy_device(nullptr); params.device = &dummy_device; params.op_kernel = kernel.get(); gtl::InlinedVector<TensorValue, 4> inputs;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jul 18 15:10:51 UTC 2022 - 5.5K bytes - Viewed (0) -
tensorflow/c/kernels/summary_op.cc
} ~Params() { TF_DeleteStatus(status); TF_DeleteTensor(tags); TF_DeleteTensor(values); } }; // dummy functions used for kernel registration void* ScalarSummaryOp_Create(TF_OpKernelConstruction* ctx) { return nullptr; } void ScalarSummaryOp_Delete(void* kernel) {} // Helper functions for compute method bool IsSameSize(TF_Tensor* tensor1, TF_Tensor* tensor2);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.h
// It does not have corresponding OpDef because it is never present // in the GraphDef. // Currently, it is used by eager runtime. FunctionLibraryRuntime creates // this kernel when asked to create a kernel for an XLA-compiled function. // // `has_ref_vars`: whether the input computation can have reference variables. // TODO(cheshire): instead derive this information from the input graph.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 16 23:44:26 UTC 2023 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/device_target.h
protected: // Adds the kernel spec with the custom scale function for the kernel. LogicalResult RegisterKernel(llvm::StringRef kernel, const KernelSpecs::Signature& signature, const ScaleFn& fn, const ScaleDecomposeFn& dfn); // Adds the kernel spec with the scale constraint type for the kernel. LogicalResult RegisterKernel(llvm::StringRef kernel,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 08 10:41:08 UTC 2024 - 7.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/README.md
These ops can also be composite ops. * (Performance) User defines a custom kernel for a regular structure (i.e. LSTM), but it is hard to add the logic to fuse the individual ops to target this kernel in the inference graph. * *Solution*: The user should define a new TF op, which corresponds to the fused kernel, with composition, and use this op to build the model for
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 29 18:32:13 UTC 2022 - 6.2K bytes - Viewed (0) -
src/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go
// When this happens, we have two options. If the Linux kernel is new // enough (4.11+), we can read the arm64 registers directly which'll // trap into the kernel and then return back to userspace. // // But on older kernels, such as Linux 4.4.180 as used on many Synology // devices, calling readARM64Registers (specifically getisar0) will // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo // instead. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 16:12:58 UTC 2024 - 3.4K bytes - Viewed (0)