- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 54 for tpu0 (0.06 sec)
-
tensorflow/compiler/jit/xla_launch_util.cc
// when the producer is a non-XLA TF GPU kernel or function (e.g. // tf.matmul). // // 3. AsyncValueTensor, containing a PjRtBuffer. This is the legacy mode // and certain device type (e.g. TPU) still uses this path. AsyncValueTensor* av_tensor = AsyncValueTensor::FromTensor(tensor); if (use_pjrt_tensor_buffer) { if (av_tensor != nullptr) { return absl::InvalidArgumentError(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0) -
tensorflow/compiler/jit/get_compiler_ir.cc
return stage == IrExportStage::HLO || stage == IrExportStage::HLO_NO_METADATA || stage == IrExportStage::HLO_SERIALIZED; }; // TODO(b/238830423): support GetCompilerIr on TFRT TPU device for stages // that requires compilation from HLO to executable. if (device_type != DEVICE_CPU && stream == nullptr && !is_tfrt_tpu_supported_stage(stage)) { return absl::InternalError(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 19K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/duplicate_shape_determining_constants.cc
// call tree upwards and duplicates all constants found in the subtree in a // similar manner. // // This pass may be used to avoid placing shape-determining constants in the CPU // graph and pass them as arguments to the TPU graph (via `TPUPartitionedCall`). // If this happens, the XLA compiler cannot recognize such arguments as // constants and may result in an error. // // A set of predefined ops and operand indices is used to determine whether an
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 17.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_resource_partitioning.mlir
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-tpu-resource-partition | FileCheck %s func.func private @computation(%arg0: tensor<i32>) -> tensor<i32> // CHECK-LABEL: func @read_write_resource // CHECK-SAME: ([[ARG0:%.+]]: tensor<!tf_type.resource<tensor<i32>>>, [[ARG1:%.+]]: tensor<!tf_type.resource<tensor<i32>>>) func.func @read_write_resource(%arg0: tensor<!tf_type.resource<tensor<i32>>>, %arg1: tensor<!tf_type.resource<tensor<i32>>>) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 23 23:53:20 UTC 2024 - 15.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.cc
#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/protobuf/core_platform_payloads.pb.h" #include "tensorflow/core/tpu/tpu_defs.h" #include "tensorflow/core/util/debug_data_dumper.h" #include "tsl/platform/errors.h" namespace tensorflow { namespace { constexpr absl::string_view kGroupSizeAttrName =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 17:24:39 UTC 2024 - 45.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/hoist_invariant_ops.mlir
func.return %1 : tensor<*xf32> } } // ----- module attributes {tf_saved_model.semantics} { // Test not hoisting in TPU functions. // CHECK-LABEL: func @_tfrt_resource_init // CHECK-NEXT: return // CHECK-LABEL: func private @func2 func.func private @func2(%arg: tensor<i1>) -> tensor<i32> { // CHECK-NOT: tf._TfrtGetResource
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 01 23:54:14 UTC 2024 - 18.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/replace_stablehlo_ops_in_main_function_with_xla_call_module_ops.cc
// Default version number for native serialization. constexpr int64_t kDefaultVersion = 9; // Platforms for XlaCallModuleOp. constexpr StringRef kPlatformCpu = "CPU"; constexpr StringRef kPlatformTpu = "TPU"; class ReplaceStablehloOpsInMainFunctionWithXlaCallModuleOpsPass : public impl:: ReplaceStablehloOpsInMainFunctionWithXlaCallModuleOpsPassBase<
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 21K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_merge_variables_with_execute.cc
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" #define DEBUG_TYPE "tf-tpu-merge-variables-with-execute" namespace mlir { namespace TFTPU { namespace { constexpr char kAliasingAttr[] = "tf.aliasing_output"; constexpr char kDeviceAttr[] = "device";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 29 17:52:11 UTC 2024 - 27K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_space_to_depth_pass.cc
// Current supported ops between convolution input and the block arguments are // PadOp and CastOp. return GetInputBlockArgNum(conv2d.getInput()); } // Applies space to depth transform for the first convolution on TPU device. void HandleFirstConvolution(TF::Conv2DOp conv2d, int64_t block_size) { // Check if input and filter type are RankedTensorType. auto input_tensor_type =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 29.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_device.cc
// handles empty computations. Another approach could be to drop empty // clusters within MLIR but that seems to trigger other failures but can be // considered again. // Old bridge only removes unsupported TPU types (only string for now) // during outside compilation extraction so this should be enough for // the parity. bool is_unsupported_type = mlir::isa<mlir::TF::StringType>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 33.4K bytes - Viewed (0)