- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 45 for shardKey (0.36 sec)
-
tensorflow/cc/saved_model/metrics.cc
// should be equal among all checkpoints written per job. auto* checkpoint_size = monitoring::Counter<2>::New( "/tensorflow/core/checkpoint/write/checkpoint_size", "Size of checkpoint (.index and sharded data files), rounded to the " "nearest 100 MB.", "api_label", "filesize"); } // namespace // Counter that records how long it took to execute the checkpoint sharding // callback in microseconds.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jan 18 23:43:59 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/extract_outside_compilation.cc
return mlir::failure(); shard_output_types.push_back(shard_type); full_output_types.push_back(output.getType()); } // Convert split sharded inputs to MANUAL sharded inputs. // common_split_sharding is the split sharding that is common to all inputs // and outputs. llvm::SmallVector<Value, 4> manual_inputs; manual_inputs.reserve(inputs.size());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 21:25:12 UTC 2024 - 68.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/python/mlir.cc
#include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace { // All the passes we will make available to Python by default. // TODO(tf): this should be sharded instead of being monolithic like that. static void RegisterPasses() { static bool unique_registration = [] { mlir::registerAllPasses(); mlir::registerTensorFlowPasses();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:16:49 UTC 2024 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.td
} def TPUShardingIdentificationPass : Pass<"tf-tpu-sharding-identification", "ModuleOp"> { let summary = "Identifies and handles inputs/outputs of TPU computation that is " "sharded across logical cores."; let constructor = "tensorflow::tf2xla::internal::CreateTPUShardingIdentificationPass()"; let description = [{ Bubbles up sharding configuration from `cluster_func` regions into
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 02:01:13 UTC 2024 - 19.8K bytes - Viewed (0) -
src/runtime/HACKING.md
by type `p`. There are exactly `GOMAXPROCS` Ps. A P can be thought of like a CPU in the OS scheduler and the contents of the `p` type like per-CPU state. This is a good place to put state that needs to be sharded for efficiency, but doesn't need to be per-thread or per-goroutine. The scheduler's job is to match up a G (the code to execute), an M (where to execute it), and a P (the rights and resources to execute
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 13.9K bytes - Viewed (0) -
src/runtime/metrics.go
// runtime. This set of stats is grouped together because they // depend on each other in some way to make sense of the runtime's // current heap memory use. They're also sharded across Ps, so it // makes sense to grab them all at once. type heapStatsAggregate struct { heapStatsDelta // Derived from values in heapStatsDelta. // inObjects is the bytes of memory occupied by objects,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 08 21:03:13 UTC 2024 - 26K bytes - Viewed (0) -
tensorflow/cc/saved_model/testdata/half_plus_two_pbtxt/00000123/saved_model.pbtxt
producer: 23 } } saver_def { filename_tensor_name: "save/Const:0" save_tensor_name: "save/Identity:0" restore_op_name: "save/restore_all" max_to_keep: 5 sharded: true keep_checkpoint_every_n_hours: 10000.0 version: V2 } collection_def { key: "asset_filepaths" value { node_list { value: "Const:0" } }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 26 01:10:27 UTC 2017 - 46.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/tpu_cluster_formation.cc
for (mlir::TF::IdentityOp to_erase : erase_list) { to_erase->erase(); } return result_op; } // Return the cluster's per-replica result type, converting any full-shaped // tensor types into sharded-shaped ones if they're partitioned. llvm::SmallVector<Type, 8> GetClusterResultTypes( mlir::tf_device::ClusterOp cluster, const PartitionedClusterOutputMap& partitioned_outputs) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 22:03:30 UTC 2024 - 39.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td
let hasVerifier = 1; } def TF_TPUPartitionedOutputOp : TF_Op<"TPUPartitionedOutput", [Pure]> { let summary = [{ An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned }]; let description = [{ outputs outside the XLA computation. }]; let arguments = (ins TF_Tensor:$inputs,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 04:08:35 UTC 2024 - 90.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_rewrite.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 22:03:30 UTC 2024 - 172.9K bytes - Viewed (0)