- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 1,177 for kDevice (0.19 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/colocate_tpu_copy_with_dynamic_shape.cc
auto device = op->getAttrOfType<StringAttr>(kDevice); for (auto *operand : operands) propagateIfChanged(operand, operand->SetDevice(device)); } else { // Propagate device through other ops. These ops might have their // own device annotation, but that's fine. We only care about // where the TPUExecute ops live. StringAttr device; for (const Device *d : results) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Aug 23 00:30:27 UTC 2023 - 5.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/target_annotation.cc
// TODO(b/177376459): Update if needed to make testing easy. if (!module_) { for (const auto& device : device_specs) { auto* hardware = this->GetTargetHardware(device); if (hardware == nullptr) continue; if (hardware->IsOpSupported(op)) { SetAnnotation(op, kDevice, device, builder); device_is_set = true; break; } } } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 19 19:32:06 UTC 2023 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/common/targets.h
return name; } // Get the target annotation form the op. inline std::optional<std::string> GetTargetAnnotation(Operation* op) { auto device = op->getAttrOfType<StringAttr>(kDevice); if (device == nullptr || device.getValue().empty()) return std::nullopt; return GetCanonicalHardwareName(device.getValue().str()); } // Get inference type attribute from the operation if available.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 03:08:33 UTC 2023 - 4.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/raise_target_subgraphs.cc
// `{ tac.device = "GPU", tac.inference_type = "FLOAT"}` to a function // with the matching attributes. Assumed is that device type "CPU" // is the only device that is allowed to call other devices. I.e. ancestors of a // "CPU" `Operation` may only `Operations` without a device or other "CPU" // `Operations`. Implied is that "CPU" ops may contain subgraphs of different
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/get_alternative_subgraph.cc
for (const auto& device : devices) { if (inference_type == QUANTIZED_INT8) { all_device_inference_types.push_back({device, QUANTIZED_INT8}); } else if (inference_type == QUANTIZED_UINT8) { all_device_inference_types.push_back({device, QUANTIZED_UINT8}); } // We will alway enable float. all_device_inference_types.push_back({device, FLOAT}); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 03:08:33 UTC 2023 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_sequencing.cc
// TODO(bfontain): Check for other attributes. replicated_output->setAttr(kDevice, builder.getStringAttr("")); TF::TPUReplicatedInputOp input = builder.create<TF::TPUReplicatedInputOp>( op->getLoc(), result.getType(), replicated_output.getResults()); input->setAttr(kDevice, builder.getStringAttr("")); mlir::Value new_value = input.getOutput();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 39.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter.cc
return std::nullopt; if (!HasValidHardwareTarget(op)) return std::nullopt; auto device = op->getAttrOfType<mlir::StringAttr>(mlir::TFL::tac::kDevice); if (device == nullptr) return std::nullopt; llvm::StringRef device_name_str = device.getValue(); return device_name_str.str(); } std::optional<std::vector<float>> GetPerDeviceCosts(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 06:11:34 UTC 2024 - 7.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_pipelining.cc
// TODO(bfontain): Check for other attributes. replicated_output->setAttr(kDevice, builder.getStringAttr("")); TF::TPUReplicatedInputOp input = builder.create<TF::TPUReplicatedInputOp>( op->getLoc(), result.getType(), replicated_output.getResults()); input->setAttr(kDevice, builder.getStringAttr("")); mlir::Value new_value = input.getOutput();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 92.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/pick_subgraphs.cc
// Set interface_name & target to the call_op as well. new_call->setAttr(kInterfaceNameAttr, builder->getStringAttr(interface_name)); new_call->setAttr( kDevice, builder->getStringAttr(preferred_inference_device_type.hardware)); new_call->setAttr( kInferenceType, builder->getStringAttr(GetInferenceString(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 24 15:10:02 UTC 2022 - 19.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/partial-device-name.pbtxt
op: "Add" input: "input0" input: "input1" # If device type or id doesn't exist, assign a default one (device:CPU:0). device: "/job:localhost/replica:0/task:0" attr { key: "T" value { type: DT_INT32 } } } node { name: "Mul" op: "Mul" input: "Add" input: "Add" # Empty device name should be kept untouched. device: "" attr { key: "T" value {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 26 20:48:36 UTC 2021 - 1.8K bytes - Viewed (0)