Search Options

Results per page
Sort
Preferred Languages
Advance

Results 121 - 130 of 291 for Quantized (0.68 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_flow.mlir

    // RUN: tf-quant-opt %s -quant-convert-fake-quant-to-qdq -quant-lift-quantizable-spots-as-functions -quant-insert-quantized-functions -quant-quantize-composite-functions -symbol-dce | FileCheck %s
    
    func.func @fake_quant_conv(%arg0: tensor<1x3x4x3xf32>, %arg1: tensor<2x3x3x2xf32>) -> tensor<*xf32> {
      %cst = "tf.Const"() {value = dense<0.000000e+00> : tensor<2xf32>} : () -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 3.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/ir/tfl_op_interfaces.td

          [{Returns the supported block size of float sparse operands.}],
          "std::vector<std::vector<int>>", "GetFloatBlockSize", (ins)
        >,
        InterfaceMethod<
          [{Returns the supported block size of quantized sparse operands.}],
          "std::vector<std::vector<int>>", "GetQuantizedBlockSize", (ins)
        >,
      ];
    }
    
    //===----------------------------------------------------------------------===//
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

        }]>
      ];
    }
    
    def TFL_QuantizeOp: TFL_Op<"quantize", [
        FirstAttrDerivedResultType,
        SameOperandsAndResultShape, NoMemoryEffect]> {
      let summary = "Quantize operator";
    
      let description = [{
        Converts floating point tensors to quantized integer tensors according to
        the quantization parameters defined in the type attribute.
      }];
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tf_tfl_passes.h

    // is because StableHLO Quantizer accepts StableHLO modules.
    void AddPreQuantizationStableHloToTfPasses(
        mlir::StringRef entry_function_name,
        const mlir::TFL::PassConfig& pass_config,
        mlir::OpPassManager& pass_manager);
    
    // Adds the second portion of StableHlo->TF passes happening after quantization.
    // The input module is expected to be an MHLO module, or a quantized StableHLO
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 01 06:14:07 UTC 2024
    - 4.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/common/ir/FakeQuantSupport.h

    limitations under the License.
    ==============================================================================*/
    //
    // This file defines support utilities for interoperating with FakeQuant* based
    // QAT (Quantized Aware Training) computations, as implemented by TFLite. Note
    // that FakeQuant* operators mix multiple concerns specific to how TFLite
    // originally implemented quantization. As such, utilities here enforce
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 11:52:27 UTC 2024
    - 3.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/python/converter_python_api_wrapper.cc

          py::arg("enable_variable_quantization") = false,
          py::arg("disable_per_channel_for_dense_layers") = false,
          py::arg("debug_options_proto_txt_raw") = nullptr,
          R"pbdoc(
          Returns a quantized model.
        )pbdoc");
      m.def(
          "ExperimentalMlirSparsifyModel",
          [](py::object input_contents_txt_raw) {
            return tensorflow::PyoOrThrow(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 18:18:30 UTC 2024
    - 5.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/lstm.json

    // CHECK-SAME: input_to_output_intermediate = tensor<*x!quant.calibrated<f32<-1.000000e+00:1.000000e+00>>>
    
    // Checks if calibrated type is exported back to quantized type.
    // RoundTrip: name: "effective_hidden_scale_intermediate",
    // RoundTrip-NEXT: quantization: {
    // RoundTrip-NEXT: min: [ -0.5 ],
    // RoundTrip-NEXT: max: [ 0.5 ]
    
    {
      "version": 3,
      "operator_codes": [
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 06:25:50 UTC 2024
    - 9.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc

    };
    
    // This pass performs a manual conversion with FakeQuant, converting between
    // floating point and quantized space. It is designed to reproduce TF's
    // implementation, mirroring the previous XLA implementation.
    //
    // 1. Computing proper quantized bounds. This involves nudging the input bounds.
    // 2. Converting the input bounds to quantized space, rounding values.
    // 3. Convert back into floating point space.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/post_quantize.cc

            return success();
          }
    
          op.replaceAllUsesWith(q.getInput());
          return success();
        }
        return failure();
      }
    };
    
    // Fold the constant quantized Transpose ops.
    struct FoldTransposeOp : public OpRewritePattern<TransposeOp> {
      explicit FoldTransposeOp(MLIRContext* context)
          : OpRewritePattern<TransposeOp>(context, 1) {}
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/cc/weight_only_ptq.cc

                              WeightOnlyPtqComponent::kName, *function_aliases,
                              *ctx, *module));
    
      // Remove the `tpu` tag for exporting because the output quantized model is
      // essentially a CPU model.
      tags.erase("tpu");
    
      py_function_library.SaveExportedModel(
          dst_saved_model_path, post_calibrated_exported_model,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 02:59:01 UTC 2024
    - 5.1K bytes
    - Viewed (0)
Back to top