- Sort Score
- Result 10 results
- Languages All
Results 1 - 6 of 6 for batch_timeout_micros (0.25 sec)
-
tensorflow/compiler/mlir/tfrt/tests/reconfig_batch_op.mlir
} // CHECK-LABEL: func @main func.func @main(%arg0: tensor<1x3xf32>) -> tensor<*xf32> { // CHECK: "tf.BatchFunction" // CHECK-SAME: allowed_batch_sizes = [6] // CHECK-SAME: batch_timeout_micros = 100000 : i64 // CHECK-SAME: batching_queue = "" // CHECK-SAME: container = "" // CHECK-SAME: enable_large_batch_splitting = false // CHECK-SAME: max_batch_size = 6 : i64
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 17:38:34 UTC 2024 - 5.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/batch_function_lowering.mlir
// CHECK: tfrt_fallback_async.batch_function device("/device:CPU:0") @batched_function // CHECK-SAME: Tin = [f32] // CHECK-SAME: Tout = [f32] // CHECK-SAME: allowed_batch_sizes = [6] // CHECK-SAME: batch_timeout_micros = 100000 : i64 // CHECK-SAME: batching_queue = "" // CHECK-SAME: container = "" // CHECK-SAME: enable_large_batch_splitting = false // CHECK-SAME: max_batch_size = 6 : i64
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 00:18:59 UTC 2024 - 2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/hoist_invariant_ops.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 01 23:54:14 UTC 2024 - 18.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/mlrt/tf_to_mlrt.mlir
// CHECK: [[y_future:%.*]] = tf_mlrt.batch_function // CHECK-SAME: f = @batched_function // CHECK-SAME: \22batch_function\22 %y = "tf.BatchFunction"(%x) { allowed_batch_sizes = [6], batch_timeout_micros = 100000 : i64, batching_queue = "", container = "", device = "/device:CPU:0", enable_large_batch_splitting = false, f = @batched_function, max_batch_size = 6 : i64, max_enqueued_batches = 10 : i64,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 20:44:15 UTC 2024 - 24.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.td
%res = tfrt_fallback_async.batch_function device("/CPU:0") @f(%input, %captured_input) { num_batch_threads = 1, max_batch_size = 4, allowed_batch_sizes = [2 , 4], batch_timeout_micros = 50, container = "container", shared_name = "shared_name", batching_queue = "batching_queue", enable_large_batch_splitting = false, Tin = [f32],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 15:01:21 UTC 2024 - 15.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
captured_tensors=computation.captured_inputs, Tout=[o.type for o in computation.definition.signature.output_arg], num_batch_threads=1, max_batch_size=10, batch_timeout_micros=100000, # 100ms allowed_batch_sizes=[3, 10], batching_queue="") ``` If more than one session.run call is simultaneously trying to compute `b`
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 793K bytes - Viewed (0)