- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 13 for compute_op (0.16 sec)
-
tensorflow/compiler/mlir/tfr/python/test_utils.py
from tensorflow.python.framework import test_util from tensorflow.python.platform import test class OpsDefsTest(test.TestCase): """Test utils.""" def _assertOpAndComposite(self, vars_, compute_op, compute_composite, kwargs, op_kwargs=None): if op_kwargs is None: op_kwargs = kwargs if test_util.IsMklEnabled(): self.skipTest("Not compatible with oneDNN custom ops.")
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jun 02 18:32:17 UTC 2023 - 1.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/batch_function_deduplicate.mlir
func.return %arg0, %arg1 : tensor<?x?xi32>, tensor<?x?xi32> } // Batch function in batch_1 uses the same shared_name as the one in batch_0, // so compute_1 is deduped, and compute_0 will be used here. // CHECK-LABEL: func private @batch_1 // CHECK: f = @compute_0 // CHECK-NOT: f = @compute_1 func.func private @batch_1(%arg0: tensor<?x?xi32>) -> tensor<*xi32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Aug 14 15:35:49 UTC 2023 - 3.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/batch_function_deduplicate_failed.mlir
func.return %0#0 : tensor<*xi32> } // compute_3 has different argument types from compute_1, thus error is reported.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Aug 14 15:35:49 UTC 2023 - 2.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_rewrite_pass.cc
} auto compile_op = builder->create<TF::_TPUCompileMlirOp>( cluster_func.getLoc(), /*compilation_status=*/compilation_status_type, /*program=*/ llvm::SmallVector<Type, 8>(num_cores_per_replica, program_type), compile_op_operands, txt_module, txt_metadata); return tensorflow::WrapOpInLaunch(builder, compile_op.getLoc(), compile_op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 21:25:12 UTC 2024 - 29.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_program_key.cc
Operation* tpu_compile_successor = nullptr; func_op->walk([&](TF::_TPUCompileMlirOp compile_op) { if (compile_op->getParentOp() == nullptr || !isa<tf_device::LaunchOp>(compile_op->getParentOp())) return WalkResult::advance(); Operation* compile_launch_op = compile_op->getParentOp(); if (compile_launch_op->getBlock() == preprocess_or_launch->getBlock() &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.4K bytes - Viewed (0) -
.bazelrc
# See https://developer.nvidia.com/cuda-gpus#compute # `compute_XY` enables PTX embedding in addition to SASS. PTX # is forward compatible beyond the current compute capability major # release while SASS is only forward compatible inside the current # major release. Example: sm_80 kernels can run on sm_89 GPUs but # not on sm_90 GPUs. compute_80 kernels though can also run on sm_90 GPUs.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 17:12:54 UTC 2024 - 52.9K bytes - Viewed (0) -
tensorflow/c/experimental/saved_model/internal/saved_model_api_test.cc
TF_SavedModel* saved_model = TF_LoadSavedModel(model_dir.c_str(), ctx, status); EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status); TF_ConcreteFunction* compute_fn = TF_GetSavedModelConcreteFunction(saved_model, "compute", status); EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status); std::vector<TFE_TensorHandle*> compute_fn_inputs;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 23 08:08:45 UTC 2024 - 21.3K bytes - Viewed (0) -
src/cmd/link/internal/ppc64/asm.go
o1 &^= 0x3ffff o2 &^= 0x0ffff o1 |= computePrefix34HI(t) o2 |= computeLO(int32(t)) case objabi.R_ADDRPOWER_D34: o1 &^= 0x3ffff o2 &^= 0x0ffff o1 |= computePrefix34HI(t) o2 |= computeLO(int32(t)) case objabi.R_ADDRPOWER: o1 &^= 0xffff o2 &^= 0xffff o1 |= computeHA(int32(t)) o2 |= computeLO(int32(t)) case objabi.R_ADDRPOWER_DS: o1 &^= 0xffff o2 &^= 0xfffc
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 19 20:54:08 UTC 2024 - 63.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_pipelining.cc
TF::TPUCompilationResultOp compile_op = nullptr; for (auto op : func.getRegion().getOps<TF::TPUCompilationResultOp>()) { if (compile_op == nullptr) { compile_op = op; } else { ops_to_erase.insert(op); } } // If there's no outside compilation, we can exit early because this isn't // a TPU function. if (compile_op == nullptr) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 92.9K bytes - Viewed (0) -
configure.py
'you want to build with.\nYou can find the compute capability of your ' 'device at: https://developer.nvidia.com/cuda-gpus. Each capability ' 'can be specified as "x.y" or "compute_xy" to include both virtual and' ' binary GPU code, or as "sm_xy" to only include the binary ' 'code.\nPlease note that each additional compute capability '
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jun 10 04:32:44 UTC 2024 - 53.8K bytes - Viewed (0)