Search Options

Results per page
Sort
Preferred Languages
Advance

Results 101 - 110 of 157 for I8 (0.21 sec)

  1. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td

        graph will be converted into the following IR:
    
        %q_w = "tfl.pseudo_qconst"() {
             qtype = tensor<64x3x3x3x!quant.uniform<i8<-127:127>:f32, 1.000000e+00>>
        %w = "tfl.dequantize"(%q_w) :
             (tensor<64x3x3x3x!quant.uniform<i8<-127:127>:f32, 1.000000e+00>>) ->
             tensor<64x3x3x3xf32>
        %conv = "tfl.conv_2d"(%input_act, %w, %bias)
    
        but if it is supported, it will be rewritten as:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/type_attr.mlir

        tf_executor.fetch
      }
      func.return
    }
    
    func.func @plain() {
      tf_executor.graph {
        %0:2 = tf_executor.island wraps "tf.Placeholder"() {type = i8} : () -> tensor<16xi8>
        tf_executor.fetch
      }
      func.return
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 28 12:06:33 UTC 2022
    - 1.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/optimize_batch_matmul.mlir

      %0 = arith.constant dense<[[1.0], [2.0]]> : tensor<2x1xf32>
      %1 = "tfl.quantize"(%0) {qtype = tensor<2x1x!quant.uniform<i8:f32, 0.024986599940879671:92>>} : (tensor<2x1xf32>) -> tensor<2x1x!quant.uniform<i8:f32, 0.024986599940879671:92>>
      %2 = "tfl.dequantize"(%1) : (tensor<2x1x!quant.uniform<i8:f32, 0.024986599940879671:92>>) -> tensor<2x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel_4bit.pbtxt

      attr {
        key: "Tshape"
        value {
          type: DT_INT32
        }
      }
    }
    
    # MLIR-LABEL: func @main
    # MLIR-SAME:  (%[[ARG_0:[a-z0-9]+]]: tensor<1x1x1x256x!quant.uniform<i8:f32, 0.21632751372549019:27>>) -> tensor<1x6x31x!quant.uniform<i8:f32, 0.09363494573854933:22>>
    # MLIR-SAME:  control_outputs = ""
    # MLIR-SAME:  inputs = "input"
    # MLIR-SAME:  outputs = "output"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.1K bytes
    - Viewed (0)
  5. src/internal/types/testdata/check/decls0.go

    	}
    	I5 interface {
    		m1(I5)
    	}
    	I6 interface {
    		S0 /* ERROR "non-interface type S0" */
    	}
    	I7 interface {
    		I1
    		I1
    	}
    	I8 /* ERROR "invalid recursive type" */ interface {
    		I8
    	}
    	I9 /* ERROR "invalid recursive type" */ interface {
    		I10
    	}
    	I10 interface {
    		I11
    	}
    	I11 interface {
    		I9
    	}
    
    	C1 chan int
    	C2 <-chan int
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 19:19:55 UTC 2024
    - 4.1K bytes
    - Viewed (0)
  6. src/compress/flate/testdata/huffman-rand-1k.in

    �;�f�)�y�d��T���d�;���q����]�����W�9j%�v�:�]�qϜb�j��1Ѩf03�Q���`�M2m�&!�~.%gr�����˗�3Xsp��(#����Vwhズ�WVW���<���ȊWS�/�nf��3�!�q�|��� �^������ں�'��i�<��
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Mar 11 17:40:52 UTC 2016
    - 1000 bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/passes/optimize.td

        (TF_CastOp:$i8_cast
          (TF_ClipByValueOp:$clip $input, $min_value, $max_value),
          ConstBoolAttrFalse:$truncate2),
        ConstBoolAttrFalse:$truncate1),
      (TF_CastOp $clip, ConstBoolAttrFalse),
      [(TensorOf<[I8]> $i8_cast),
       (TensorOf<[I32]> $clip),
       (IsIntSplatValueEqual<"int32_t", "-128"> $min_value),
       (IsIntSplatValueEqual<"int32_t", "127"> $max_value)]>;
    
    // This pattern optimizes:
    //   (x + cst1) + cst2 -> x + cst
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 2.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc

    //   * Input tensors are per-tensor uniform quantized (i8->f32)
    //     tensors (full integer) with shape [..., r_x, c_x] or [..., c_x, r_x].
    //   * The filter tensor is a per-tensor uniform quantized (i8->f32) tensor
    //     (constant or activation) with shape [..., r_y, c_y] or [..., c_y, r_y].
    //   * Output tensors are per-tensor uniform quantized (i8->f32) or
    //     per-channel uniform quantized (i32->f32) tensors.
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 09:00:19 UTC 2024
    - 99.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/tests/replace_cast_hacks_with_tf_xla_ops.mlir

        %20 = "tf.AddV2"(%19, %arg10) : (tensor<i32>, tensor<i32>) -> tensor<i32>
        %21 = "tf.Cast"(%20) : (tensor<i32>) -> tensor<i8>
        %22 = "tf.Cast"(%21) {Truncate = false} : (tensor<i8>) -> tensor<i8>
        %23 = "tf.Cast"(%22) {Truncate = false} : (tensor<i8>) -> tensor<i32>
        %24 = "tf.Maximum"(%cst_0, %arg10) : (tensor<i32>, tensor<i32>) -> tensor<i32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 81K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

      %cst = arith.constant dense<1.0> : tensor<5x2xf32>
      %q = "tfl.quantize"(%cst) {qtype = tensor<5x2x!quant.uniform<i8<-127:127>:f32, 1.113490e-03>>} : (tensor<5x2xf32>) -> tensor<5x2x!quant.uniform<i8<-127:127>:f32, 1.113490e-03>>
      %dq = "tfl.dequantize"(%q) : (tensor<5x2x!quant.uniform<i8<-127:127>:f32, 1.113490e-03>>) -> tensor<5x2xf32>
      %t = "tfl.transpose"(%dq, %perm) : (tensor<5x2xf32>, tensor<2xi32>) -> tensor<2x5xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
Back to top