Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 8 of 8 for dequantize (0.17 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir

    }
    // CHECK-LABEL: uniform_dequantize_op_ui16_storage_input
    // CHECK: stablehlo.uniform_dequantize
    // CHECK-NOT: tfl.dequantize
    
    // -----
    
    // Tests that the pattern doesn't match when the input quantized tensor's
    // storage type is i32. i32 storage type is not compatible with
    // `tfl.dequantize`.
    
    func.func @uniform_dequantize_op_i32_storage_input(%arg: tensor<2x2x!quant.uniform<i32:f32, 1.000000e+0:8>>) -> tensor<2x2xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 106.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc

      }
    };
    
    // stablehlo.uniform_dequantize -> tfl.dequantize
    class RewriteUniformDequantizeOp
        : public OpRewritePattern<stablehlo::UniformDequantizeOp> {
      using OpRewritePattern<stablehlo::UniformDequantizeOp>::OpRewritePattern;
    
      // Determines whether the input and output types are compatible with
      // `tfl.dequantize`. See the definition for the `DEQUANTIZE` kernel for the
      // detailed limitations
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 09:00:19 UTC 2024
    - 99.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

      func.return %0 : tensor<8x8x8x8xf32>
    
      // CHECK-LABEL: fakeQuantArgsFalse
      // CHECK: "tfl.quantize"(%arg0) <{qtype = tensor<8x8x8x8x!quant.uniform<u8:f32, 0.0011764706057660721:85>>}>
      // CHECK: %1 = "tfl.dequantize"(%0) : (tensor<8x8x8x8x!quant.uniform<u8:f32, 0.0011764706057660721:85>>) -> tensor<8x8x8x8xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/optimize.cc

          if (fc_op.getFilter() != filter) {
            // This filter goes through quantize and dequantize ops. Then we just
            // need to update the weight to the quantize op.
            filter.replaceAllUsesWith(new_filter_op);
          } else {
            // This filter doesn't go through quantize and dequantize ops, Then
            // we update the weight of the affine op directly.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 102.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

    // Quantization ops.
    //===----------------------------------------------------------------------===//
    def TFL_DequantizeOp: TFL_Op<"dequantize", [NoMemoryEffect]> {
      let summary = "Dequantize operator";
    
      let description = [{
        Converts quantized array of integers to floating-points according to the
        quantization parameters.
      }];
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/ops.mlir

    func.func @testDequantize(tensor<? x !quant.uniform<i8:f32, 0.1>>) -> tensor<? x f32> {
    ^bb0(%arg0: tensor<? x !quant.uniform<i8:f32, 0.1>>):
      // CHECK: "tfl.dequantize"(%arg0) : (tensor<?x!quant.uniform<i8:f32, 1.000000e-01>>) -> tensor<?xf32>
      %0 = "tfl.dequantize"(%arg0): (tensor<? x !quant.uniform<i8:f32, 0.1>>) -> tensor<? x f32>
      func.return %0 : tensor<? x f32>
    }
    
    // CHECK-LABEL: testLogicalNot
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py

        ):
          quantize_model.quantize(
              self._input_saved_model_path,
              self._output_saved_model_path,
              quantization_options=quantization_options,
              representative_dataset=representative_dataset,
          )
    
        converted_model = quantize_model.quantize(
            self._input_saved_model_path,
            self._output_saved_model_path,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 235.6K bytes
    - Viewed (0)
  8. src/cmd/internal/obj/s390x/asmz.go

    	op_PTI     uint32 = 0xB99E // FORMAT_RRE        PROGRAM TRANSFER WITH INSTANCE
    	op_PTLB    uint32 = 0xB20D // FORMAT_S          PURGE TLB
    	op_QADTR   uint32 = 0xB3F5 // FORMAT_RRF2       QUANTIZE (long DFP)
    	op_QAXTR   uint32 = 0xB3FD // FORMAT_RRF2       QUANTIZE (extended DFP)
    	op_RCHP    uint32 = 0xB23B // FORMAT_S          RESET CHANNEL PATH
    	op_RISBG   uint32 = 0xEC55 // FORMAT_RIE6       ROTATE THEN INSERT SELECTED BITS
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 16 17:46:09 UTC 2024
    - 176.7K bytes
    - Viewed (0)
Back to top