Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 121 for func_0 (0.14 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/tests/pick-subgraphs.mlir

        %2 = func.call @func_2_CPU_FLOAT(%0, %1) {tac.device = "CPU", tac.inference_type = "FLOAT", tac.interface_name = "func_2"} : (tensor<100xf32>, tensor<100xf32>) -> tensor<2x100xf32>
        func.return %2 : tensor<2x100xf32>
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-op-cost.mlir

    // -----
    
    func.func @pack_CPU(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tensor<2x100xf32> attributes {tac.device = "CPU", tac.interface_name = "func_2"} {
      // CHECK: tac.cost = 1.000000e+02
      %0 = "tfl.pack"(%arg0, %arg1) {axis = 0 : i32, tac.device = "CPU", values_count = 2 : i32} : (tensor<100xf32>, tensor<100xf32>) -> tensor<2x100xf32>
      func.return %0 : tensor<2x100xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 05:29:10 UTC 2022
    - 5.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/experimental/tac/tests/compute-cost.mlir

    func.func @func_0_GPU(%arg0: tensor<256x32x32x3xf32>, %arg1: tensor<256x32x32x3xf32>) -> tensor<256x32x32x3xf32> attributes {tac.device = "GPU", tac.interface_name = "func_0"} {
      %0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU", tac.device = "GPU"} : (tensor<256x32x32x3xf32>, tensor<256x32x32x3xf32>) -> tensor<256x32x32x3xf32>
      func.return %0 : tensor<256x32x32x3xf32>
    }
    
    // -----
    
    // CHECK: tac.cost = 1.000000e+03
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 05:29:10 UTC 2022
    - 4.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir

      func.return %0 : tensor<i32>
    }
    }
    
    // CHECK:     func.func @simpleWhile(%arg0: tensor<i32>) -> tensor<i32> {
    // CHECK:       %0 = call @func_0_CPU_FLOAT(%arg0) {tac.device = "CPU", tac.inference_type = "FLOAT", tac.interface_name = "func_0"} : (tensor<i32>) -> tensor<i32>
    // CHECK:       return %0 : tensor<i32>
    // CHECK:     }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir

    module {
      func.func @simpleTest(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>, %arg3: tensor<1xf32>) -> tensor<2x1xf32> {
        %0 = func.call @func_0_GPU_FLOAT(%arg0, %arg1, %arg2) {tac.interface_name = "func_0"} : (tensor<1xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
        %1 = func.call @func_1_GPU_FLOAT(%arg0, %arg3) {tac.interface_name = "func_1"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/README.md

        %2 = call @func_2_CPU_FLOAT(%0, %1) {tac.device = "CPU", tac.inference_type = "FLOAT", tac.interface_name = "func_2"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
        return %2 : tensor<2x1xf32>
      }
    ```
    
    Why we need to raise those ops into `FuncOps`? Please see the following section.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 29 18:32:13 UTC 2022
    - 11.6K bytes
    - Viewed (0)
  7. test/closure3.dir/main.go

    			b := 3
    			return func(y int) int { // ERROR "can inline main.func27.1" "can inline main.main.func27.func34"
    				c := 5
    				return func(z int) int { // ERROR "can inline main.func27.1.1" "can inline main.main.func27.func34.1" "can inline main.func27.main.func27.1.2" "can inline main.main.func27.main.main.func27.func34.func36"
    					return a*x + b*y + c*z
    				}(10) // ERROR "inlining call to main.func27.1.1"
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Aug 17 19:36:29 UTC 2023
    - 8.3K bytes
    - Viewed (0)
  8. tensorflow/c/c_api_function_test.cc

      if (GetName(funcs[0]) == GetName(func0)) {
        AssertEqual(func0, funcs[0]);
        AssertEqual(func1, funcs[1]);
      } else {
        AssertEqual(func0, funcs[1]);
        AssertEqual(func1, funcs[0]);
      }
    
      TF_DeleteFunction(funcs[0]);
      TF_DeleteFunction(funcs[1]);
    
      TF_DeleteFunction(func0);
      TF_DeleteFunction(func1);
    }
    
    }  // namespace
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 20 22:08:54 UTC 2023
    - 63.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/experimental/tac/transforms/fold_constants_to_subgraph.cc

    // This pass is used to fold tfl.const ops to each subgraph (func::FuncOp):
    // See the example below:
    //
    // In main:
    // %0 = tfl.const...
    // %1 = tfl.const...
    // %2 = call func_1(..., %0,...)
    // %3 = call func_2(..., %0, ..., %1...)
    // ...
    //
    // Then those consts will be copied into each function and replace their usage.
    // func_1:
    //   %0 = tfl.const...
    // func_2:
    //   %0 = tfl.const...
    //   %1 = tfl.const...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/common/func.cc

    // Returns true iff the function's symbol is public.
    bool IsPublicFuncOp(func::FuncOp func_op) {
      return SymbolTable::getSymbolVisibility(&*func_op) ==
             SymbolTable::Visibility::Public;
    }
    
    }  // namespace
    
    func::FuncOp FindMainFuncOp(ModuleOp module_op) {
      if (const auto main_func_op = module_op.lookupSymbol<func::FuncOp>(
              kImportModelDefaultGraphFuncName);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Feb 19 06:55:11 UTC 2024
    - 2K bytes
    - Viewed (0)
Back to top