Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 43 for 1xf32 (0.06 sec)

  1. tensorflow/compiler/mlir/tfr/ir/tfr_ops.td

        tensor type, the shape shouldn't be changed during the conversion.
    
        Example:
    
        ```mlir
        %1 = tfr.constant_tensor(%0) : f32 -> tensor<f32>
        %3 = tfr.constant_tensor(%2) : vector<1xf32> -> tensor<1xf32>
        ```
      }];
    
      let arguments = (ins TFR_AllAttrTypes:$arg);
    
      let results = (outs TFR_singleTensorType:$out);
    
      let hasCanonicalizer = 1;
    
      let hasVerifier = 1;
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 10:54:29 UTC 2024
    - 17.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

      %0 = "quantfork.stats"(%arg0) {
        layerStats = dense<[-1.28e-5, 1.27e-5]> : tensor<2xf32>
      } : (tensor<1x5x5x2xf32>) -> tensor<1x5x5x2xf32>
      %w = arith.constant dense<[[[[-1.0, 1.0]]], [[[1.0, 2.0]]], [[[-2.0, 1.0]]]]> : tensor<3x1x1x2xf32>
      %b = arith.constant dense<0.0> : tensor<3xf32>
      %b2 = arith.constant dense<[1.0e-2, 2.1473647e1, -2.1473647e2]> : tensor<3xf32>
      %conv = "tfl.conv_2d"(%0, %w, %b) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/post-quantize.mlir

    // CHECK-NEXT:  %[[quant:.*]] = "tfl.quantize"(%[[split]]#0) <{qtype = tensor<2x!quant.uniform<u8:f32, 1.000000e+00>>}> {volatile} : (tensor<2xf32>) -> tensor<2x!quant.uniform<u8:f32, 1.000000e+00>>
    // CHECK-NEXT:  return %[[quant]] : tensor<2x!quant.uniform<u8:f32, 1.000000e+00>>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 19.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant-4bit.mlir

      %min = arith.constant dense<0.0> : tensor<16xf32>
      %max = arith.constant dense<15.0> : tensor<16xf32>
      %mini = "tf.Identity"(%min) : (tensor<16xf32>) -> tensor<16xf32>
      %maxi = "tf.Identity"(%max) : (tensor<16xf32>) -> tensor<16xf32>
      %fq = "tf.FakeQuantWithMinMaxVarsPerChannel"(%in, %mini, %maxi) {num_bits = 3, narrow_range = false} : (tensor<3x3x3x16xf32>, tensor<16xf32>, tensor<16xf32>) -> tensor<3x3x3x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 22K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant.mlir

      %min = arith.constant dense<0.0> : tensor<16xf32>
      %max = arith.constant dense<255.0> : tensor<16xf32>
      %mini = "tf.Identity"(%min) : (tensor<16xf32>) -> tensor<16xf32>
      %maxi = "tf.Identity"(%max) : (tensor<16xf32>) -> tensor<16xf32>
      %fq = "tf.FakeQuantWithMinMaxVarsPerChannel"(%in, %mini, %maxi) {num_bits = 5, narrow_range = false} : (tensor<3x3x3x16xf32>, tensor<16xf32>, tensor<16xf32>) -> tensor<3x3x3x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/lstm.mlir

    "TANH", input_to_cell_intermediate = tensor<0xf32>, input_to_forget_intermediate = tensor<0xf32>, input_to_input_intermediate = tensor<0xf32>, input_to_output_intermediate = tensor<0xf32>, proj_clip = 0.000000e+00 : f32, time_major = false} : (tensor<?x?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>, tensor<?xf32>, tensor<?xf32>, tensor<?xf32>, tensor<?xf32>, tensor<?xf32>, tensor<?xf32>, tensor<?xf32>,...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-quant.mlir

      } : (tensor<2x!tf_type.qint8>, tensor<f32>, tensor<i32>) -> tensor<2xf32>
      func.return %1 : tensor<2xf32>
    }
    
    // -----
    
    // CHECK-LABEL: func @uniform_quantize_and_dequantize_per_axis
    func.func @uniform_quantize_and_dequantize_per_axis(%arg0 : tensor<2x2xf32>) -> tensor<2x2xf32> {
      %scales = "tf.Const"() { value = dense<[1.0, 2.0]> : tensor<2xf32> } : () -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 01:25:29 UTC 2024
    - 37.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir

        %4 = "tf.BiasAdd"(%3, %cst) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32>
        %5 = "tf.Relu6"(%4) {device = ""} : (tensor<*xf32>) -> tensor<*xf32>
    
        func.return %2, %5 : tensor<*xf32>, tensor<*xf32>
      }
    
      func.func @float_conv_1(%arg0: tensor<1x3x4x3xf32>, %arg1: tensor<2x3x3x2xf32>) -> (tensor<*xf32>, tensor<*xf32>) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/tests/composite-lowering.mlir

      %12 = mhlo.clamp %9, %11, %1 : tensor<2xf32>
      %13 = mhlo.multiply %arg0, %12 : tensor<2xf32>
      %14 = mhlo.divide %13, %1 : tensor<2xf32>
      return %14 : tensor<2xf32>
    }
    
    // CHECK-LABEL:   func.func @hardswish(
    // CHECK-SAME:                         %[[VAL_0:.*]]: tensor<2xf32>) -> tensor<*xf32> {
    // CHECK:           %[[VAL_1:.*]] = "tfl.hard_swish"(%[[VAL_0]]) : (tensor<2xf32>) -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 32.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir

    // MinElement-LABEL: QuantizeCustomOp
    func.func @QuantizeCustomOp(%arg0: tensor<1x1x1x1xf32>) -> (tensor<*xf32>, tensor<*xf32>, tensor<*xf32>) attributes {tf.entry_function = {inputs = "input", outputs = "custom_op"}} {
      %0 = "quantfork.stats"(%arg0) {layerStats = dense<[0.000000e+00, 2.550000e+02]> : tensor<2xf32>} : (tensor<1x1x1x1xf32>) -> tensor<1x1x1x1xf32>
      %w_1 = arith.constant dense<127.0> : tensor<4096x1x1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 38.2K bytes
    - Viewed (0)
Back to top