Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 12 for 256x1xf32 (0.19 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/tests/unfuse_mhlo_batch_norm.mlir

    func.func @batchNormInference_2D_inner_features(
        %x: tensor<4x256xf32>, %scale: tensor<256xf32>, %offset: tensor<256xf32>,
        %mean: tensor<256xf32>, %variance: tensor<256xf32>)
        -> (tensor<4x256xf32>) {
      // CHECK-DAG: %[[EPS_BCAST:.+]] = mhlo.constant dense<1.001000e-05> : tensor<256xf32>
      // CHECK-DAG: %[[VARIANCE_EPS:.+]] = mhlo.add %[[VARIANCE]], %[[EPS_BCAST]] : tensor<256xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 10.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/README.md

     func private @func_2_CPU_FLOAT(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<2x1xf32> attributes {tac.device = "CPU", tac.inference_type = "FLOAT", tac.interface_name = "func_2"} {
        %0 = "tfl.pack"(%arg0, %arg1) {axis = 0 : i32, tac.device = "CPU", tac.inference_type = "FLOAT", values_count = 2 : i32} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
        return %0 : tensor<2x1xf32>
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 29 18:32:13 UTC 2022
    - 11.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant.mlir

      %2 = "tf.Reshape"(%0, %cst_0) : (tensor<1x2xf32>, tensor<2xi64>) -> tensor<2x1xf32>
      func.return %1, %2 : tensor<2x1xf32>, tensor<2x1xf32>
    
    // CHECK:  %cst = arith.constant
    // CHECK:  %[[FQ:.*]] = "tf.FakeQuantWithMinMaxVars"(%arg0, %arg1, %arg2)
    // CHECK:  %[[R1:.*]] = "tf.Reshape"(%[[FQ]], %cst)
    // CHECK-SAME: tensor<2x1xf32>
    // CHECK:  %[[R2:.*]] = "tf.Reshape"(%[[FQ]], %cst)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant-4bit.mlir

      %2 = "tf.Reshape"(%0, %cst_0) : (tensor<1x2xf32>, tensor<2xi64>) -> tensor<2x1xf32>
      func.return %1, %2 : tensor<2x1xf32>, tensor<2x1xf32>
    
    // CHECK:  %cst = arith.constant
    // CHECK:  %[[FQ:.*]] = "tf.FakeQuantWithMinMaxVars"(%arg0, %arg1, %arg2)
    // CHECK:  %[[R1:.*]] = "tf.Reshape"(%[[FQ]], %cst)
    // CHECK-SAME: tensor<2x1xf32>
    // CHECK:  %[[R2:.*]] = "tf.Reshape"(%[[FQ]], %cst)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 22K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/defer_activation_transpose.mlir

    func.func @add_with_activation_transpose_rank_two(%arg0: tensor<1x2xf32>) -> tensor<2x1xf32> {
      %0 = stablehlo.constant dense<2.000000e+00> : tensor<2x1xf32>
      %1 = stablehlo.transpose %arg0, dims = [1, 0] : (tensor<1x2xf32>) -> tensor<2x1xf32>
      %2 = stablehlo.add %1, %0 : tensor<2x1xf32>
      return %2 : tensor<2x1xf32>
    }
    // CHECK: %[[TRANSPOSE_0:.+]] = stablehlo.transpose
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 14.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/duplicate_shape_determining_constants.mlir

      // time constant.
      %0 = "tf.ConcatV2"(%arg0, %arg0, %arg0, %arg0, %axis) : (tensor<16x1xf32>, tensor<16x1xf32>, tensor<16x1xf32>, tensor<16x1xf32>, tensor<i32>) -> tensor<16x4xf32>
    
      // Just to introduce an extra use for %cst.
      %1 = "tf.AddV2"(%axis, %axis) {device = ""} : (tensor<i32>, tensor<i32>) -> tensor<i32>
    
      return %0 : tensor<16x4xf32>
    }
    // Check that the constant is cloned with same value.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Nov 24 07:44:46 UTC 2022
    - 11K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/experimental/tac/tests/device-transform-gpu.mlir

    func.func @pack(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<2x1xf32> {
      %0 = "tfl.pack"(%arg0, %arg1) {axis = 0 : i32, values_count = 2 : i32} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
      func.return %0 : tensor<2x1xf32>
    }
    
    // CHECK:   func @pack(%[[VAL_0:.*]]: tensor<1xf32>, %[[VAL_1:.*]]: tensor<1xf32>) -> tensor<2x1xf32> {
    // CHECK-DAG:       %[[VAL_2:.*]] = "tfl.pseudo_const"{{.*}}dense<1> : tensor<4xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/tests/tpu_space_to_depth_pass.mlir

        %1:2 = "tf.IteratorGetNext"(%arg4) {device = "/job:localhost/replica:0/task:0/device:CPU:0"} : (tensor<*x!tf_type.resource>) -> (tensor<2x224x224x3xf32>, tensor<2x1xf32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 37.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir

        %2 = func.call @func_2_CPU_FLOAT(%0, %1) {tac.interface_name = "func_2"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
        func.return %2 : tensor<2x1xf32>
      }
    
      func.func private @func_2_CPU_FLOAT(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<2x1xf32> attributes {tac.device = "CPU", tac.inference_type = "FLOAT", tac.interface_name = "func_2"} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir

      %cst_0 = "tf.Const"() {value = dense<0.400000e+00> : tensor<256xf32>} : () -> tensor<256xf32>
      %cst_1 = "tf.Const"() {value = dense<0.500000e+00> : tensor<256xf32>} : () -> tensor<256xf32>
      %w = "tf.AddV2"(%cst, %cst_1) : (tensor<48x48x3x1xf32>, tensor<256xf32>) -> tensor<48x48x3x256xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 33.3K bytes
    - Viewed (0)
Back to top