- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 245 for tpu0 (0.09 sec)
-
tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.cc
#include "tensorflow/core/platform/status.h" #include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h" #include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace tf2xla { namespace internal { using metrics::IncrementTfMlirBridgeSecondPhaseCounter; using metrics::MlirBridgeSecondPhaseMetric; using tpu::MlirToHloArgs; absl::StatusOr<XlaCompilationResult> LegalizeTfToHlo(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Apr 14 20:29:34 UTC 2024 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/convert_to_legacy_compile_and_replicate_attributes.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 22:03:30 UTC 2024 - 6.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/mark_input_output_aliases.mlir
%1 = "tf.ReadVariableOp"(%arg1) : (!tf_res_f32) -> tensor<f32> %2 = "tf.ReadVariableOp"(%arg2) : (!tf_res_f32) -> tensor<f32> %device_output:2 = "tf_device.cluster_func"(%0, %1, %2) {_xla_compile_device_type = "TPU", _replication_info = "tpu", func = @device_func_0} : (tensor<i32>, tensor<f32>, tensor<f32>) -> (tensor<f32>, tensor<i32>) "tf.AssignVariableOp"(%arg1, %device_output#0) : (!tf_res_f32, tensor<f32>) -> ()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 04:14:26 UTC 2024 - 6.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.h
absl::Status CompileTensorflowGraphToHlo( const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>& computation, const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args, XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_funcs, const std::vector<tensorflow::TensorShape>& arg_shapes, std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 08:08:57 UTC 2024 - 2.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.h
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace tf2xla { namespace internal { // Legalize the given MLIR module to XLA HLO using a combination of the MLIR // Bridge and XlaBuilder absl::StatusOr<XlaCompilationResult> LegalizeTfToHlo( const tpu::MlirToHloArgs& computation,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Apr 14 20:29:34 UTC 2024 - 2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.cc
#include "tensorflow/core/platform/profile_utils/cpu_utils.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h" #include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h" #include "tensorflow/core/tpu/tpu_compile.h" #include "tsl/platform/error_logging.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace tensorflow {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Apr 14 20:29:34 UTC 2024 - 6.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/function/function.h
// If true, use ServingCoreSelector to pick TPU core. Otherwise, obtain core // location from assigned device name. // Currently we don't use core_selector for training use cases. bool tpu_use_core_selector = false; // If true, use BundledTransferToTpuOp to transfer variables and input tensors // to TPU. bool tpu_use_bundled_transfer = false; // If true, lower an TF op that's placed on TPU device to be executed with
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 04:50:20 UTC 2024 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/set_tpu_infeed_layout.cc
#include "xla/shape.h" #include "xla/stream_executor/tpu/c_api_conversions.h" #include "xla/stream_executor/tpu/tpu_api.h" #include "xla/translate/mhlo_to_hlo/type_to_shape.h" namespace mlir { static FailureOr<std::vector<int64_t>> GetTPUInfeedLayoutFromAPI( RankedTensorType t) { // Call the TPU API to determine the right infeed layout. Note that // this can fail if we're not running on a TPU-enabled node.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.h
// client - The Xla Compilation client. absl::StatusOr<tensorflow::XlaCompilationResult> LegalizeMlirToHlo( const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>& computation, const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args, llvm::StringRef device_type, std::vector<std::unique_ptr<mlir::Pass>>& custom_legalization_passes,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 07:32:57 UTC 2024 - 2.9K bytes - Viewed (0) -
istioctl/pkg/metrics/metrics.go
return time.Duration(val) * time.Millisecond } func printHeader(writer io.Writer) { w := tabwriter.NewWriter(writer, 13, 1, 2, ' ', tabwriter.AlignRight) _, _ = fmt.Fprintf(w, "%40s\tTOTAL RPS\tERROR RPS\tP50 LATENCY\tP90 LATENCY\tP99 LATENCY\t\n", "WORKLOAD") _ = w.Flush() } func printMetrics(writer io.Writer, wm workloadMetrics) { w := tabwriter.NewWriter(writer, 13, 1, 2, ' ', tabwriter.AlignRight)
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Sat Apr 13 05:23:38 UTC 2024 - 8.4K bytes - Viewed (0)