Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 323 for quantized (0.22 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc

    };
    
    // Matches the pattern for quantized convolution op and rewrites it to use
    // uniform quantized types.
    //
    // Currently assumes asymmetric per-tensor quantization for activations and
    // symmetric per-channel quantization for filters.
    //
    // This pattern represents the following derived equation, where:
    // * rn = real (expressed) value for tensor n
    // * qn = quantized value for tensor n
    // * sn = scale for tensor n
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto

        METHOD_NO_QUANTIZE = 1;
    
        // Static range quantization. Quantized tensor values' ranges are statically
        // determined. The activation and weight are quantized to INT8 while bias is
        // quantized to INT32.
        METHOD_STATIC_RANGE_INT8 = 2;
    
        // Dynamic range quantization. Quantized tensor values' ranges are
        // determined in the graph executions. The weights are quantized during
        // conversion.
        METHOD_DYNAMIC_RANGE_INT8 = 3;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 19 06:31:19 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_composite_functions.cc

    };
    
    void QuantizeCompositeFunctionsPass::runOnOperation() {
      MLIRContext& ctx = getContext();
    
      PassManager pm(&ctx);
      // Intermediate output from QuantizePass will have quantized ops
      // (XlaCallModuleOps) with quantized input and output types, which are not
      // allowed in the TF dialect.
      pm.enableVerifier(false);
    
      PrepareQuantizePassOptions options;
      options.enable_per_channel_quantized_weight_ =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 02:59:01 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto

        // and unquantized layers. The unquantized layer's input come from the
        // previous quantized layer (Please note that this part is different part
        // from DEBUGGER_TYPE_FLOAT_PER_LAYER). Each layer in the debugging model
        // has a DumpTensor, and it is used to save the entire value of outputs from
        // both the quantized and unquantized layer.
        DEBUGGER_TYPE_INT_PER_LAYER = 2;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 14.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/common/ir/QuantOps.td

    //      quantized representation may be acceptable.
    //
    // Especially early in transformation, it is common to have pairs of
    // qcast/dcast at points where a transition to a quantized type is
    // required. In addition, it is also common to have an identity qcast
    // (where the operand and result type are not quantized) at all points where
    // it is legal to use a quantized representation (but is not known to be
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 09 03:10:59 UTC 2024
    - 10.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc

    // (e.g. matmul) has both quantized and unquantized inputs by dequantizing
    // the quantized inputs, performing the operation in the expressed type, then
    // requantizing if a quantized output is required.
    //
    // The motivation behind these changes is for Dialects that assume only float
    // or quantized computation, and do not support a mixture of these types on
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions.mlir

    // CHECK: -------- Quantization Summary --------
    // CHECK: Number of quantized layers in the model
    // CHECK: --------------------------------
    // CHECK: Name    Count/Total
    // CHECK: ================================
    // CHECK: Conv2D  1/2
    
    // CHECK: Number of quantized layers with quantized outputs: 1/1
    // CHECK: Number of quantize layers added: 1
    // CHECK: Number of dequantize layers added: 1
    }
    
    // -----
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 06 01:23:21 UTC 2023
    - 15.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h

      bool SetConstantResultParams(Operation* op);
    
      // Inserts the Quantize and Dequantize ops after `op`'s `index`-th result. The
      // quantized element type for the result is `quantized_type`.
      void QuantizeOpResult(Operation* op, int result_index,
                            QuantizedType quantized_type);
    
      // Inserts the Quantize and Dequantize ops after `arg`. The quantized element
      // type for `arg` is `quantized_type`.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 20 11:42:17 UTC 2024
    - 16.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/cc/report.cc

        return result;
      } else {
        return std::nullopt;
      }
    }
    
    // Populates quantized ops from `module_op` to `results`. After going through
    // the quantization passes, quantized ops are represented as `func::CallOp` with
    // a callee's prefix of `quantized_`.
    void PopulateQuantizedResults(ModuleOp module_op,
                                  QuantizationResults& results) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc

          llvm::SmallVector<Value, 4> quantized;
          for (auto user : returned.getUsers()) {
            if (auto q = Quantized(user)) {
              quantized.push_back(q);
            }
          }
          if (quantized.size() == 1) {
            ret.setOperand(i, quantized.front());
          }
          i++;
        }
      });
    
      // We prefer to placing quantization emulation ops on the results of the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.6K bytes
    - Viewed (0)
Back to top