Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 108 for Quantized (0.14 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/add_dump_tensor_op.cc

    constexpr StringRef kCompositeFuncPrefix = "composite_";
    constexpr StringRef kEmptyNodeName = "_empty_node";
    
    // Returns a pair: `func_name` and `node_name` for the lifted function. In TF
    // quantizer, both are filled. For StableHLO quantizer, the func_name is only
    // filled and node_name is always set to "_empty_node".
    std::pair<std::string, std::string> GetFuncNameAndNodeName(
        TF::PartitionedCallOp call_op, const FlatSymbolRefAttr &f_attr) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 13K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir

        func.return %dot_out : tensor<*x!tf_type.qint32>
      }
    
      // Quantize initial input at the start of the graph. Output is qint8.
      func.func @quantize_i8(%input : tensor<*xf32>, %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>) -> tensor<*x!tf_type.qint8> {
        %quantize = "tf.UniformQuantize"(%input, %input_scale, %input_zp) {
          Tin = "tfdtype$DT_FLOAT",
          Tout = "tfdtype$DT_QINT8",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 29 01:13:58 UTC 2023
    - 19.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc

      auto func_op = module_op->lookupSymbol<func::FuncOp>("quantize");
      ASSERT_THAT(func_op, NotNull());
    
      auto uniform_quantize_op_itr =
          func_op.getBody().op_begin<mlir::stablehlo::UniformQuantizeOp>();
      ASSERT_THAT(
          uniform_quantize_op_itr,
          Ne(func_op.getBody().op_end<mlir::stablehlo::UniformQuantizeOp>()));
    
      // `uniform_quantize` is considered partially quantized because its output is
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 28.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_weight_only.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-insert-quantized-functions='quantization-method=weight_only target-opset=XLA' -quant-quantize-composite-functions='quantization-method=weight_only target-opset=XLA' -symbol-dce | FileCheck --check-prefix=PerTensor %s
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/post_quantize.cc

            return success();
          }
    
          op.replaceAllUsesWith(q.getInput());
          return success();
        }
        return failure();
      }
    };
    
    // Fold the constant quantized Transpose ops.
    struct FoldTransposeOp : public OpRewritePattern<TransposeOp> {
      explicit FoldTransposeOp(MLIRContext* context)
          : OpRewritePattern<TransposeOp>(context, 1) {}
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/transforms/get_alternative_subgraph.cc

                          GetInferenceString(device_inference_type.inference_type));
    }
    
    // For every device, we will do the following:
    // If the inference type is quantized, we will try the float alternative.
    // If it's float, we will just keep it as it is.
    std::vector<InferenceDeviceType> GetAllAlternativeInferenceDeviceType(
        InferenceType inference_type, ArrayRef<std::string> devices) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 03:08:33 UTC 2023
    - 12.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/merge-fusion-with-dequantize.mlir

        return %1 : tensor<1x3xf32>
      }
    }
    
    // -----
    
    // Do not merge when function is not quantized function.
    
    module attributes {tf_saved_model.semantics} {
      // CHECK-LABEL: func.func private @merge_relu_fusion
      func.func private @merge_relu_fusion(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 04 23:45:53 UTC 2024
    - 14K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc

              clEnumValN(OpSet::XLA, "XLA", "Uses TF XLA ops"),
              clEnumValN(OpSet::UNIFORM_QUANTIZED, "UNIFORM_QUANTIZED",
                         "Uses TF Uniform Quantized ops"))};
    
      // Initialize for tests.
      void initializeForTest() {
        if (!test_mode_) return;
    
        op_set_.setCallback([this](const OpSet& new_op_set) {
          quant_options_.set_op_set(new_op_set);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 16.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc

        }
    
        // TODO: b/264218457 - Refactor the component below once StableHLO Quantizer
        // can run DRQ. Temporarily using TF Quantization for StableHLO DRQ.
        if (!toco_flags.has_quantization_options()) {
          // The default minimum number of elements a weights array must have to be
          // quantized by this transformation.
          const int kWeightsMinNumElementsDefault = 1024;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 23.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h

        assert(scales_.size() == zero_points_.size());
      }
    
      // Quantize an Attribute by the quantization parameters. Return nullptr if
      // the conversion fails or the input array isn't an ElementsAttr.
      ElementsAttr convert(Attribute real_value);
    
     private:
      // Quantize an DenseFPElementsAttr by the quantization parameters.
      DenseElementsAttr convert(DenseFPElementsAttr attr);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 9.8K bytes
    - Viewed (0)
Back to top