Search Options

Results per page
Sort
Preferred Languages
Advance

Results 91 - 100 of 306 for Quantized (0.18 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions_weight_only.mlir

    // RUN: tf-quant-opt %s -quant-insert-quantized-functions='quantization-method=weight_only target-opset=XLA' | FileCheck %s
    
    // Empty module
    module {
      func.func @simple_fn(%arg0: tensor<*xf32>) -> tensor<*xf32> {
        func.return %arg0 : tensor<*xf32>
      }
    }
    
    // CHECK-NOT: func private @internal_dequantize_f32
    // CHECK-NOT: func private @internal_conv3d_fn
    // CHECK-NOT: func private @internal_batch_matmul_fn
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 16 03:34:36 UTC 2023
    - 843 bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/quantize_patterns.td

    include "tensorflow/compiler/mlir/lite/ir/tfl_ops.td"
    
    // Quantize attribute $0 by using quantization parameter from %1.
    def QuantizeByQuantizedType : NativeCodeCall<"quant::Quantize($0, $1.getValue())">;
    def F32ElementsAttr : ElementsAttrBase<
      CPred<"$_self.cast<ElementsAttr>().getShapedType().getElementType().isF32()">, "float constant tensor">;
    
    // Squash tfl.dequantize and tfl.quantize pairs.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc

              clEnumValN(OpSet::XLA, "XLA", "Uses TF XLA ops"),
              clEnumValN(OpSet::UNIFORM_QUANTIZED, "UNIFORM_QUANTIZED",
                         "Uses TF Uniform Quantized ops"))};
    
      Option<QuantMethod> quantization_method_{
          *this, "quantization-method",
          llvm::cl::init(tensorflow::quantization::QuantizationMethod::
                             METHOD_STATIC_RANGE_INT8),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tf_tfl_translate_cl.cc

        "tf-custom-opdefs", llvm::cl::desc("List of custom opdefs when importing "
                                           "graphdef"));
    
    // Quantize and Dequantize ops pair can be optionally emitted before and after
    // the quantized model as the adaptors to receive and produce floating point
    // type data with the quantized model. Set this to `false` if the model input is
    // integer types.
    // NOLINTNEXTLINE
    opt<bool> emit_quant_adaptor_ops(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 20:53:17 UTC 2024
    - 7.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/quantize.cc

      static bool IsQuantizableCustomOp(Operation* op,
                                        const quant::CustomOpMap& custom_op_map) {
        // In some cases, ops may need to be quantized even though their op trait is
        // not quantizable. For example, for the case of custom op various ops can
        // be categorized as cusom ops despite each of them may require different
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tf_tfl_passes.cc

      // The following two passes find specific uniform quantization patterns in
      // StableHLO and converts them to TFLite ops that accept or produce uniform
      // quantized types. They only target a specific set of models that contain
      // "decomposed" quantized ops produced from the framework level. This is why
      // they are placed right after the `LegalizeTFXlaCallModuleToStablehloPass`
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 25.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py

          != _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8
      ):
        raise ValueError(
            'StableHLO quantized opset currently only supports static range'
            ' quantization and weight-only quantizationvia TF Quantizer.'
        )
    
      # Set `force_graph_mode_calibration` to True to avoid skipping op execution,
      # which are not connected to return ops, during calibration execution.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 34.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_ops_to_mhlo.cc

        return success();
      }
    };
    
    // UniformDequantizeOp takes TF quantized types as input which would have been
    // converted to the mhlo quantized types. Use OpConversionPattern in order to
    // retrieve the operand type *after* conversion, using OpAdaptor operand
    // accessor.
    // Same for other Uniform Quant Ops that take TF quantized types as input.
    class ConvertUniformDequantizeOp
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 30.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/nchw_convolution_to_nhwc.mlir

    }
    
    // CHECK-NOT: stablehlo.transpose
    // CHECK: %[[CONV:.+]] = stablehlo.convolution
    // CHECK-SAME{LITERAL}: [b, f, 0, 1]x[o, i, 0, 1]->[b, 0, 1, f]
    // CHECK-NOT: stablehlo.transpose
    
    // -----
    
    // Tests that a quantized convolution does not match. No conversion occurs.
    
    // CHECK-LABEL: quantized_convolution
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 25 23:00:47 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc

            per_axis_type.getStorageTypeMin(), per_axis_type.getStorageTypeMax());
      }
    
      auto quantize = builder.create<quantfork::QuantizeCastOp>(
          q_op.getLoc(), new_value_type.clone(new_qtype), new_value);
      auto dequantize = builder.create<quantfork::DequantizeCastOp>(
          dq_op.getLoc(), new_value_type, quantize.getResult());
      return dequantize.getResult();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 13.3K bytes
    - Viewed (0)
Back to top