Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 13 for batch_function (0.22 sec)

  1. tensorflow/compiler/mlir/tfrt/tests/tfrt_fallback/batching_fallback.mlir

    // ... then the code should not crash.
    // CHECK-LABEL: Running 'test_batch_returns_multiple_refs'
    func.func @test_batch_returns_multiple_refs() -> !tfrt.chain {
      %ch0 = tfrt.new.chain
    
      %0 = tfrt_fallback_async.const_dense_tensor dense<[[1, 1], [1, 1]]> : tensor<2x2xi32>
    
      %1, %2 = tfrt_fallback_async.batch_function device("/device:CPU:0") @returns_multiple_refs (%0) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jul 18 22:58:56 UTC 2023
    - 8.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/analysis/resource_value_typed_analyzer.cc

        // interface the `CallOpInterface` so it should be handled separately.
        if (auto batch_function = dyn_cast<TF::BatchFunctionOp>(op)) {
          // Propagate the analysis results from within the callee's body.
          PropagatePotentiallyWrittenUpFromCallee(batch_function.func().getRegion(),
                                                  batch_function.getOperands());
          return;
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 15 09:04:13 UTC 2024
    - 8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfrt/tests/batch_function_lowering.mlir

      %0 = "tf.VarHandleOp"() {device = "/device:CPU:0", container = "", shared_name = "variable"} : () -> tensor<!tf_type.resource<tensor<1x3xf32>>>
      // CHECK: tfrt_fallback_async.batch_function device("/device:CPU:0") @batched_function
      // CHECK-SAME: Tin = [f32]
      // CHECK-SAME: Tout = [f32]
      // CHECK-SAME: allowed_batch_sizes = [6]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tfrt/tests/batch_function_fallback_resource_variable_as_captured_tensor.mlir

          // CHECK: tfrt_fallback_async.batch_function device([[DEVICE:.*]]) @batched_func ([[BATCHED_FUNC_ARG:%.*]])
          // CHECK-SAME: Tcaptured = [!corert.resource]
          // CHECK-SAME: Tin = []
          // CHECK-SAME: Tout = [f32]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Aug 14 15:35:49 UTC 2023
    - 1.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfrt/tests/tfrt_fallback/batch_function_fallback_benchmark_test.cc

    #include "tfrt/tensor/tensor.h"  // from @tf_runtime
    
    namespace tensorflow {
    namespace {
    
    // Creates a BEF file with a program that runs
    // tfrt_fallback_async.batch_function with a empty function forwarding inputs or
    // outputs.
    std::pair<tfrt::BefBuffer, tfrt::RCReference<tfrt::BEFFile>> CreateBefFile(
        tfrt::HostContext* host) {
      std::string file_path = GetDataDependencyFilepath(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Dec 08 08:08:48 UTC 2023
    - 6.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfrt/tests/reconfig_batch_op.mlir

    // CHECK-LABEL: func private @batched_function
    func.func private @batched_function(%arg0: tensor<1x3xf32>) -> tensor<1x3xf32> {
      %2 = "tf.Identity"(%arg0) : (tensor<1x3xf32>) -> tensor<1x3xf32>
      func.return %2 : tensor<1x3xf32>
    }
    
    // CHECK-LABEL: func @main
    func.func @main(%arg0: tensor<1x3xf32>) -> tensor<*xf32> {
      // CHECK:  "tf.BatchFunction"
      // CHECK-SAME: allowed_batch_sizes = [6]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 17:38:34 UTC 2024
    - 5.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/batch_use_same_function/saved_model.pbtxt

    # Test importing a saved model with 2 signatures that are using a same
    # BatchFunction Op, which references to a same inference_func from graph_def
    # library. The result should be that both signatures uses the same
    # BatchFunction Op (the shared_name is the same) and the same copy of
    # inference_func.
    
    # CHECK: f = @inference_func[[post_fix:[^,]*]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 09 16:20:29 UTC 2022
    - 2.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/tests/resource_analyzer.mlir

    // assigned inside the function called by "tf.BatchFunction".
    
    module {
    // CHECK-LABEL: @serving_default
      func.func @serving_default() -> (tensor<*xi32>) {
        // expected-remark@below {{device: "", container: "", shared_name: "var_0", is_potentially_written: false}}
        %0 = "tf.VarHandleOp"() {shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xi32>>>
        %1 = "tf.BatchFunction"(%0) {
            f = @called_by_batch_func,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Aug 14 15:35:49 UTC 2023
    - 2.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/batch_function_deduplicate.mlir

    // the functions invoked by BatchFunction with the same shared_name and the
    // function `compute_2` will not be removed as the shared_name is different.
    
    // CHECK-LABEL: func private @batch_0
    // CHECK: f = @compute_0
    func.func private @batch_0(%arg0: tensor<?x?xi32>) -> tensor<*xi32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Aug 14 15:35:49 UTC 2023
    - 3.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/force_shared_name_for_resource_ops.pbtxt

    # CHECK: "tf.BatchFunction"
    # CHECK-SAME: shared_name = "batch_node"
    
    # CHECK: func private @create_resource
    # CHECK: tf.HashTableV2
    # CHECK-SAME: shared_name = "hash_table_node@create_resource"
    # CHECK: "tf.Variable"
    # CHECK-SAME: shared_name = "variable_node"
    # CHECK: "tf.VariableV2"
    # CHECK-SAME: shared_name = "variable_v2_node"
    # CHECK: "tf.BatchFunction"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 31 02:37:48 UTC 2021
    - 5.5K bytes
    - Viewed (0)
Back to top