- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 54 for tpu0 (0.15 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/prepare_tpu_computation_for_tf_export.cc
func.getLoc(), op.getOperandTypes(), /*dynamic_key=*/dynamic_key, op.getSendKeyAttr(), /*device_ordinal=*/rewriter.getI64IntegerAttr(0), rewriter.getStringAttr("TPU")); for (auto result : llvm::zip(cloned_func.getArguments(), recv_at_host->getResults())) { std::get<0>(result).replaceAllUsesWith(std::get<1>(result)); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/xla_broadcast.cc
auto status_or_device_coodinates = tensorflow::GetDeviceCoordinates(device_assignment_attr); if (!status_or_device_coodinates.ok()) return cluster.emitError() << "error in fetching tpu device coordinates: " << status_or_device_coodinates.status().message(); status_or_tpu_device_assignment = tensorflow::GetTPUCompilationAndExecutionDevices(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 13 18:52:07 UTC 2024 - 13.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/mark_ops_for_outside_compilation.cc
RewritePatternSet* patterns) { for (auto op : context->getRegisteredOperations()) op.getCanonicalizationPatterns(*patterns, context); } // Adds the list of ops that are supported on TPU through constant folding which // may depend on the inputs shapes not known at this point. Such ops may not // have any legalization or canonicalization patterns but shouldn't be marked // for outside compilation. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 21.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc
quantization_config.mutable_static_range_ptq_preset() ->set_enable_per_channel_quantized_weight( quantization_options.enable_per_channel_quantization()); // When targeting server TPUs quantized types should be unpacked into // integer ops. quantization_config.mutable_pipeline_config()->set_unpack_quantized_types( true); *quantization_config.mutable_debugger_config() =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 23.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
// Get the device context given the index. absl::StatusOr<DeviceContext*> GetDeviceContextWithIndex(int index); // Instructs this XlaDevice to set a AcceleratorDeviceInfo, which holds extra // information for GPU and TPU devices. Status UseAcceleratorDeviceInfo() TF_LOCKS_EXCLUDED(mu_); // Instructs this XlaDevice to return 'sync_on_completion' for // AllowsSyncOnCompletion().
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/BUILD
"//tensorflow/compiler/mlir/quantization/...", "//tensorflow/compiler/mlir/tf2xla/transforms/...", "//tensorflow/lite/...", "//third_party/cloud_tpu/inference_converter/...", # TPU Inference Converter V1 ] + internal_visibility_allowlist(), ) package( # copybara:uncomment default_applicable_licenses = ["@stablehlo//:license"], default_visibility = [
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 02:59:01 UTC 2024 - 28.3K bytes - Viewed (0) -
tensorflow/compiler/jit/flags.cc
#include "absl/strings/strip.h" #include "tensorflow/compiler/mlir/tensorflow/utils/dump_graph.h" #include "xla/parse_flags_from_env.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/tpu/kernels/sparse_core_xla_flags_defaults.h" #include "tensorflow/core/util/command_line_flags.h" namespace tensorflow { namespace { BuildXlaOpsPassFlags* build_ops_flags;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 24.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.cc
// only called from one cluster. Here, we choose to fix the all-funcs-one-use // invariant right before it's needed, not after it's been broken. pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass()); // Run TPU cluster cleanup attributes so ops with no outside compiled // attribute have no host device attribute. pm.addPass(mlir::TFTPU::CreateTPUClusterCleanupAttributesPass());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 16:09:14 UTC 2024 - 11.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/export_tf_dialect_op.cc
// valid to convert a `Cast` to an `Identity`. The `_output_shapes` attribute of // the `Cast` will be preserved. This transform is needed for the graph to be // executed on TPU or GPU devices, which do not have `Cast` registered as a // runtime OpKernel. void RemoveIdentityCast(NodeDef* node_def) { auto attr = node_def->mutable_attr(); if (node_def->op() == "Cast" && attr->contains("SrcT") &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 01 11:17:36 UTC 2024 - 11.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/mark_ops_for_outside_compilation.mlir
func.func @unsupported_op_missing_soft_placement_attribute() -> tensor<i32> { %0 = "tf_device.cluster"() ({ // expected-error@+1 {{'tf.UnsupportedOp' op isn't compilable for TPU device}} %1 = "tf.UnsupportedOp"() {value = dense<1> : tensor<i32>} : () -> tensor<i32> %2 = "tf.Identity"(%1) : (tensor<i32>) -> tensor<i32> tf_device.return %2 : tensor<i32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 16:22:32 UTC 2024 - 29.5K bytes - Viewed (0)