Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 26 for 1x5x3xf32 (0.34 sec)

  1. tensorflow/compiler/mlir/lite/tests/end2end/unroll_batch_matmul.pbtxt

    # CHECK:           %[[VAL_9:.*]] = "tfl.transpose"(%[[VAL_1]], %[[VAL_2]]) : (tensor<3x7xf32>, tensor<2xi32>) -> tensor<7x3xf32>
    # CHECK:           %[[VAL_10:.*]] = "tfl.fully_connected"(%[[VAL_7]]#0, %[[VAL_9]], %[[VAL_3]]) <{fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"}> : (tensor<1x5x3xf32>, tensor<7x3xf32>, none) -> tensor<5x7xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 2.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir

        %14 = stablehlo.multiply %12, %13 : tensor<1x4x3xf32>  // s1 * s2
        %15 = call @uniform_quantize_1(%14, %5, %6) : (tensor<1x4x3xf32>, tensor<1x1x1xf32>, tensor<1x1x1xi8>) -> tensor<1x4x3xi8>
        %16 = call @uniform_dequantize_0(%15, %5, %6) : (tensor<1x4x3xi8>, tensor<1x1x1xf32>, tensor<1x1x1xi8>) -> tensor<1x4x3xf32>
        return %16 : tensor<1x4x3xf32>
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 37K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir

    func.func @QuantizeReshapeOp(%arg0: tensor<1x1x3xf32>) -> (tensor<1x3xf32>) {
      %1 = "quantfork.stats"(%arg0) {layerStats = dense<[-1.0, 1.0]> : tensor<2xf32>} : (tensor<1x1x3xf32>) -> tensor<1x1x3xf32>
      %2 = "tfl.pseudo_const"() {value = dense<[-1, 3]> : tensor<2xi32>} : () -> tensor<2xi32>
      %3 = "tfl.reshape"(%1, %2) : (tensor<1x1x3xf32>, tensor<2xi32>) -> tensor<1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 26.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/quantize-variables.mlir

      %50 = "tfl.read_variable"(%5) : (tensor<!tf_type.resource>) -> tensor<1x2x3xf32>
      %51 = "quantfork.stats"(%50) {layerStats = dense<[0.0, 1.0]> : tensor<2xf32>} : (tensor<1x2x3xf32>) -> tensor<1x2x3xf32>
      %52 = "tfl.concatenation"(%51, %0) {axis = 1 : i32, fused_activation_function = "NONE"} : (tensor<1x2x3xf32>, tensor<1x2x3xf32>) -> tensor<1x4x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc

          return %0 : tensor<1x1x4xf32>
        }
      )mlir";
    
      const OwningOpRef<ModuleOp> module_op =
          ParseModuleOpString(kXlaCallModuleOpWithQuantizationMethodAttr);
      ASSERT_TRUE(module_op);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range-float16.mlir

          time_major = false} : (
            tensor<1x2x3xf32>,
            tensor<1x1xf32>, tensor<1x1xf32>, tensor<1x1xf32>, tensor<1x1xf32>,
            tensor<1x1xf32>, tensor<1x1xf32>, tensor<1x1xf32>, tensor<1x1xf32>,
            none, none, none,
            tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>,
            none, none,
            tensor<1x3xf32>, tensor<1x3xf32>,
            none, none, none, none) -> tensor<1x2x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver_with_skipping.mlir

        %output_0, %min_1, %max_2, %histogram_3 = "tf.CustomAggregator"(%0) <{calibration_method = 1 : i32, id = "keeping_id", max_percentile = 0.000000e+00 : f32, min_percentile = 0.000000e+00 : f32, num_bins = 0 : i32}> : (tensor<10x1x3xf32>) -> (tensor<10x1x3xf32>, tensor<f32>, tensor<f32>, tensor<0xi64>)
        return %output_0 : tensor<10x1x3xf32>
      }
      // CHECK-LABEL: @main
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/optimize.mlir

    // CHECK: %[[RESULT:.*]] = "tfl.reshape"(%arg0, %[[CONST:.*]]) : (tensor<?x1x8x3xf32>, tensor<3xi32>) -> tensor<?x8x3xf32>
    // CHECK:  return %[[RESULT]]
    }
    
    func.func @ConvertSqueezeToReshapeWithDynamicDimension2(%arg0: tensor<?x1x8x3xf32>) -> tensor<1x8x3xf32> {
      %0 = "tfl.squeeze"(%arg0) {squeeze_dims = [0]}: (tensor<?x1x8x3xf32>) -> tensor<1x8x3xf32>
      func.return %0: tensor<1x8x3xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/end2end/unroll_batch_matmul_disabled.pbtxt

      producer: 175
    }
    
    # CHECK: func @main(%[[VAL_0:.*]]: tensor<2x5x3xf32>, %[[VAL_1:.*]]: tensor<3x7xf32>) -> tensor<2x5x7xf32> attributes {tf.entry_function = {control_outputs = "", inputs = "Placeholder,Placeholder_1", outputs = "MatMul"}} {
    # CHECK:     %[[VAL_2:.*]] = "tfl.batch_matmul"(%[[VAL_0]], %[[VAL_1]]) <{adj_x = false, adj_y = false}> : (tensor<2x5x3xf32>, tensor<3x7xf32>) -> tensor<2x5x7xf32>
    # CHECK:     return %[[VAL_2]] : tensor<2x5x7xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

      %0:3 = "tf.Split"(%arg0, %arg1) : (tensor<i32>, tensor<1x4x3x3xf32>) -> (tensor<1x4x3xf32>, tensor<1x4x3xf32>, tensor<1x4x3xf32>)
      func.return %0#0 : tensor<1x4x3xf32>
    
      // CHECK-LABEL: split
      // CHECK: "tfl.split"(%arg0, %arg1) <{num_splits = 3 : i32}> : (tensor<i32>, tensor<1x4x3x3xf32>) -> (tensor<1x4x3xf32>, tensor<1x4x3xf32>, tensor<1x4x3xf32>)
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
Back to top