Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 21 for Quantized (0.15 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir

    // RUN: odml-to-stablehlo-opt --compose-uniform-quantized-type \
    // RUN:     --split-input-file --verify-diagnostics %s | FileCheck %s
    
    module {
    // CHECK-LABEL: quantized_conv_op
    // CHECK-SAME: %[[ARG:.*]]: tensor<1x3x3x4xf32>
      func.func @quantized_conv_op(%arg0: tensor<1x3x3x4xf32>) -> tensor<1x3x3x4xf32> {
        %1 = stablehlo.constant dense<1.000000e+03> : tensor<1x1x1x1xf32>  // Input inverse scale.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 37K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td

    }
    
    def QuantizeCompositeFunctionsPass : Pass<"stablehlo-quantize-composite-functions", "ModuleOp"> {
      let summary = "Quantize composite functions with QDQ input / outputs.";
      let options = [
        Option<"enable_per_channel_quantized_weight_",
            "enable-per-channel-quantized-weight",
            "bool", /*default=*/"true",
            "Whether to enable per-channel quantized weights.">,
        Option<"mlir_dump_file_name_", "mlir-dump-file-name",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 10.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto

        // previous quantized layer (Please note that this part is different part
        // from DEBUGGER_TYPE_FLOAT_PER_LAYER). Each layer in the debugging model
        // has a DumpTensor, and it is used to save the entire value of outputs from
        // both the quantized and unquantized layer.
        DEBUGGER_TYPE_INT_PER_LAYER = 2;
        // DEBUGGER_TYPE_FLOAT_PER_LAYER creates a debugging model with both
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 14.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

              GetAsVector(expected_tensor->shape()));
    }
    
    // Finds the match of the quantized tensor from the possible tensors. Each
    // possible tensors can be used only once. It checks shape and name if the
    // tensor is quantized and also checks buffer contents and tensor type if not
    // quantized. For the quantized case, tensor type and quantizaction params are
    // expected to be checked in the test body with the match.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc

      // unquantized tensors are only inserted in the unquantized model
      // whereas `DumpTensor` ops for the quantized tensors are only inserted
      // in the quantized model. Both models are required to be able to dump
      // both quantized and unquantized tensors and compare them offline.
      if (quantization_options.has_debugger_config() &&
          quantization_options.debugger_config().debugger_type() ==
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 23.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py

          != _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8
      ):
        raise ValueError(
            'StableHLO quantized opset currently only supports static range'
            ' quantization and weight-only quantizationvia TF Quantizer.'
        )
    
      # Set `force_graph_mode_calibration` to True to avoid skipping op execution,
      # which are not connected to return ops, during calibration execution.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 34.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tf_tfl_passes.cc

      // The following two passes find specific uniform quantization patterns in
      // StableHLO and converts them to TFLite ops that accept or produce uniform
      // quantized types. They only target a specific set of models that contain
      // "decomposed" quantized ops produced from the framework level. This is why
      // they are placed right after the `LegalizeTFXlaCallModuleToStablehloPass`
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 25.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_ops_to_mhlo.cc

        return success();
      }
    };
    
    // UniformDequantizeOp takes TF quantized types as input which would have been
    // converted to the mhlo quantized types. Use OpConversionPattern in order to
    // retrieve the operand type *after* conversion, using OpAdaptor operand
    // accessor.
    // Same for other Uniform Quant Ops that take TF quantized types as input.
    class ConvertUniformDequantizeOp
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 30.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc

            per_axis_type.getStorageTypeMin(), per_axis_type.getStorageTypeMax());
      }
    
      auto quantize = builder.create<quantfork::QuantizeCastOp>(
          q_op.getLoc(), new_value_type.clone(new_qtype), new_value);
      auto dequantize = builder.create<quantfork::DequantizeCastOp>(
          dq_op.getLoc(), new_value_type, quantize.getResult());
      return dequantize.getResult();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py

      """Base test class for StableHLO quant tests."""
    
      def setUp(self) -> None:
        super().setUp()
    
        # Many test cases for quantization involve creating and saving the input
        # model and saving the output quantized model. These two member
        # attributes can be used to specify the paths for such models,
        # respectively. These paths will be cleaned up after each test case.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 18.2K bytes
    - Viewed (0)
Back to top