- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 71 for tpu0 (0.04 sec)
-
tensorflow/compiler/jit/flags.cc
#include "absl/strings/strip.h" #include "tensorflow/compiler/mlir/tensorflow/utils/dump_graph.h" #include "xla/parse_flags_from_env.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/tpu/kernels/sparse_core_xla_flags_defaults.h" #include "tensorflow/core/util/command_line_flags.h" namespace tensorflow { namespace { BuildXlaOpsPassFlags* build_ops_flags;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 24.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compiler_options_util_test.cc
#include "tensorflow/core/framework/device.h" #include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/tpu/tpu_defs.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using XlaDeviceExecutablePersistor =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Dec 29 01:41:20 UTC 2023 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.cc
// only called from one cluster. Here, we choose to fix the all-funcs-one-use // invariant right before it's needed, not after it's been broken. pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass()); // Run TPU cluster cleanup attributes so ops with no outside compiled // attribute have no host device attribute. pm.addPass(mlir::TFTPU::CreateTPUClusterCleanupAttributesPass());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 16:09:14 UTC 2024 - 11.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/export_tf_dialect_op.cc
// valid to convert a `Cast` to an `Identity`. The `_output_shapes` attribute of // the `Cast` will be preserved. This transform is needed for the graph to be // executed on TPU or GPU devices, which do not have `Cast` registered as a // runtime OpKernel. void RemoveIdentityCast(NodeDef* node_def) { auto attr = node_def->mutable_attr(); if (node_def->op() == "Cast" && attr->contains("SrcT") &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 01 11:17:36 UTC 2024 - 11.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/mark_ops_for_outside_compilation.mlir
func.func @unsupported_op_missing_soft_placement_attribute() -> tensor<i32> { %0 = "tf_device.cluster"() ({ // expected-error@+1 {{'tf.UnsupportedOp' op isn't compilable for TPU device}} %1 = "tf.UnsupportedOp"() {value = dense<1> : tensor<i32>} : () -> tensor<i32> %2 = "tf.Identity"(%1) : (tensor<i32>) -> tensor<i32> tf_device.return %2 : tensor<i32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 16:22:32 UTC 2024 - 29.5K bytes - Viewed (0) -
tensorflow/compiler/jit/encapsulate_xla_computations_pass_test.cc
return graph; } TEST(EncapsulateXlaComputations, DeterministicEncapsulate) { // Test that control edge insertion order doesn't affect the cache key // (cluster name) generated by TPU encapsulate pass. auto get_serialized_graph = [](bool control_input_reversed, bool operand_reversed) -> string { FunctionLibraryDefinition flib_def(OpRegistry::Global(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 16 18:03:15 UTC 2023 - 14.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.cc
// when the producer is a non-XLA TF GPU kernel or function (e.g. // tf.matmul). // // 3. AsyncValueTensor, containing a PjRtBuffer. This is the legacy mode // and certain device type (e.g. TPU) still uses this path. AsyncValueTensor* av_tensor = AsyncValueTensor::FromTensor(tensor); if (use_pjrt_tensor_buffer) { if (av_tensor != nullptr) { return absl::InvalidArgumentError(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0) -
tensorflow/compiler/jit/get_compiler_ir.cc
return stage == IrExportStage::HLO || stage == IrExportStage::HLO_NO_METADATA || stage == IrExportStage::HLO_SERIALIZED; }; // TODO(b/238830423): support GetCompilerIr on TFRT TPU device for stages // that requires compilation from HLO to executable. if (device_type != DEVICE_CPU && stream == nullptr && !is_tfrt_tpu_supported_stage(stage)) { return absl::InternalError(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 19K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/duplicate_shape_determining_constants.cc
// call tree upwards and duplicates all constants found in the subtree in a // similar manner. // // This pass may be used to avoid placing shape-determining constants in the CPU // graph and pass them as arguments to the TPU graph (via `TPUPartitionedCall`). // If this happens, the XLA compiler cannot recognize such arguments as // constants and may result in an error. // // A set of predefined ops and operand indices is used to determine whether an
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 17.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_resource_partitioning.mlir
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-tpu-resource-partition | FileCheck %s func.func private @computation(%arg0: tensor<i32>) -> tensor<i32> // CHECK-LABEL: func @read_write_resource // CHECK-SAME: ([[ARG0:%.+]]: tensor<!tf_type.resource<tensor<i32>>>, [[ARG1:%.+]]: tensor<!tf_type.resource<tensor<i32>>>) func.func @read_write_resource(%arg0: tensor<!tf_type.resource<tensor<i32>>>, %arg1: tensor<!tf_type.resource<tensor<i32>>>) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 23 23:53:20 UTC 2024 - 15.7K bytes - Viewed (0)