Search Options

Results per page
Sort
Preferred Languages
Advance

Results 71 - 80 of 306 for Quantized (0.27 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.cc

          attrs.push_back(rewriter.getNamedAttr(
              attr_minmax, rewriter.getI64IntegerAttr(quant_val)));
        }
      }
      return success();
    }
    
    // This LogicalResult covers both the hybrid and fully quantized op cases.
    LogicalResult FillAttributesForUniformQuantizedDotOp(
        PatternRewriter& rewriter, Operation* op,
        llvm::StringMap<Attribute>& identifier_to_attr,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 18.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.h

    // exported as a TF SavedModel.
    void AddCallModuleSerializationPasses(OpPassManager& pm);
    
    // Passes for unpacking quantized ops to int valued StableHLO ops. This is
    // useful when uniform quantized types are suboptimal for the hardware. It goes
    // through a StableHLO <-> MHLO roundtrip to utilize the MHLOQuantToInt pass.
    void AddStablehloQuantToIntPasses(OpPassManager& pm);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 15 12:53:33 UTC 2024
    - 3.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized_drq.mlir

    // limitations under the License.
    
    // Quantization as a function library with Uniform Quantized Ops for Dynamic
    // PTQ
    //
    // Internal functions should be marked as private. They will be inlined and
    // deleted in `InsertQuantizedFunctionsPass`.
    //
    // For Uniform Quantized op case, attributes are generated during quantize
    // composite pass. Therefore, attr_map is set to an empty string.
    
    module {
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Dec 01 12:06:54 UTC 2022
    - 3.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/common/tfl_pass_config.h

      bool reduce_type_precision = false;
      // Whether to consider this model a quantized model with quantize/dequantize
      // ops and to convert kernels to quantized kernels wherever appropriate.
      quant::QDQConversionMode qdq_conversion_mode =
          quant::QDQConversionMode::kQDQNone;
    
      // When set to true, StableHLO Quantizer is run. The full configuration for
      // the quantizer is at `TocoFlags::quantization_config`.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:05:30 UTC 2024
    - 6.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/utils/const_tensor_utils.cc

        if (!mlir::isa<mlir::IntegerType>(raw_elem_type)) {
          return absl::InvalidArgumentError(
              "Quantized tensors must be stored as integers");
        }
        storage_type = mlir::cast<mlir::IntegerType>(raw_elem_type);
      }
    
      // TFlite uses narrow-range [u]int8 for constant buffers of quantized weights.
      // Since we don't know which ones are weights, we represent this optimization
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 23:04:40 UTC 2024
    - 16.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/quantization/ir/Passes.h

    std::unique_ptr<OperationPass<func::FuncOp>> createConvertSimulatedQuantPass();
    
    /// Creates a pass that converts constants followed by a qbarrier to a
    /// constant whose value is quantized. This is typically one of the last
    /// passes done when lowering to express actual quantized arithmetic in a
    /// low level representation. Because it modifies the constant, it is
    /// destructive and cannot be undone.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jul 29 18:55:28 UTC 2022
    - 2.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_cl.cc

            "float and quantized types"),
        llvm::cl::init(""));
    
    // NOLINTNEXTLINE
    opt<std::string> min_values(
        "tf-input-min-values",
        llvm::cl::desc(
            "Sets the lower bound of the input data. Separated by ','; Each entry "
            "in the list should match an entry in -tf-input-arrays. This is "
            "used when -tf-inference-type is a quantized type."),
        llvm::cl::Optional, llvm::cl::init(""));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Aug 10 20:59:50 UTC 2023
    - 5.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.h

    // Populates TensorFlow lowering patterns to lower some of the TensorFlow
    // operations that can be represented using other TensorFlow operations.
    // Patterns are from ops with some inputs or outputs that are quantized types
    // only to ops that allow non-quantized types on all inputs and outputs.
    void PopulateLoweringQuantizedPatterns(MLIRContext *context,
                                           RewritePatternSet *patterns);
    
    }  // namespace TF
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jan 27 15:05:02 UTC 2022
    - 2.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/passes/merge_fusion_with_dequantize.cc

        SymbolTable symbol_table(call_op->getParentOfType<ModuleOp>());
        auto func_op =
            dyn_cast_or_null<func::FuncOp>(symbol_table.lookup(func_name));
        if (!func_op) return failure();
        // The quantized fusion should have requantize and return ops at the end.
        auto return_op = dyn_cast_or_null<func::ReturnOp>(
            func_op.getRegion().getBlocks().front().getTerminator());
        if (!return_op) return failure();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/quantization/ir/Passes.td

      let summary = "Converts constants followed by qbarrier to actual quantized "
                    "values";
      let constructor = "mlir::quantfork::createConvertConstPass()";
    }
    
    def QuantConvertSimulatedQuant
        : Pass<"quant-convert-simulated-quantization", "func::FuncOp"> {
      let summary = "Converts training-time simulated quantization ops to "
                    "corresponding quantize/dequantize casts";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jul 29 18:55:28 UTC 2022
    - 1.3K bytes
    - Viewed (0)
Back to top