Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 14 for Motivation (0.12 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir

    // RUN: stablehlo-quant-opt %s -stablehlo-process-nchw-tensor \
    // RUN:   -split-input-file -verify-diagnostics | FileCheck %s
    
    // Tests that a `convolution(%activation, %weight)` with the activation tensor
    // NCHW format is converted to NHWC convolution. Transpose ops are inserted to
    // the activation and output to match the function signature. The weight
    // constant is transposed.
    
    // CHECK-LABEL: nchw_conv
    // CHECK-SAME: %[[ARG:.+]]: tensor<1x8x4x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 12.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/defer_activation_transpose.mlir

    // RUN: stablehlo-quant-opt %s -stablehlo-defer-activation-transpose \
    // RUN:   -split-input-file -verify-diagnostics | FileCheck %s
    
    // Tests that an `add(transpose(arg0), arg1)` pattern is converted to
    // `transpose(add(arg0, transpose(arg1)))`. The transpose in the activation is
    // deferred to the output of `stablehlo.add` and an extra transpose op is
    // inserted to the RHS to match the shape of the operand.
    
    // CHECK-LABEL: add_with_activation_transpose
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 14.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/passes/lift_quantizable_spots_as_functions_fusion.td

    //===----------------------------------------------------------------------===//
    // Pattern rules for lifting ops with activation as functions
    //===----------------------------------------------------------------------===//
    
    def LiftConvWithRelu : Pat<
      (StableHLO_MaxOp:$res
        (StableHLO_ConvolutionOp $lhs, $rhs, $window_strides, $padding,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 04 07:19:09 UTC 2024
    - 23.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td

      }];
      let dependentDialects = ["mlir::stablehlo::StablehloDialect"];
    }
    
    def DeferActivationTransposePass : Pass<"stablehlo-defer-activation-transpose", "mlir::func::FuncOp"> {
      let summary = "Merges stablehlo.transpose for activations.";
      let description = [{
        Defers activation transposes (e.g. LHS of `stablehlo.add`) to the output and
        optionally inserts `stablehlo.transpose`s to match the shape of operands.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 10.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/merge-fusion-with-dequantize.mlir

        return %1 : tensor<1x3x!quant.uniform<i8:f32, 1.000000e-03:-3>>
      }
    }
    
    // -----
    
    // Merge fusion with dequantize for no activation case.
    
    module attributes {tf_saved_model.semantics} {
      // CHECK-LABEL: func.func private @merge_no_act_fusion
      func.func private @merge_no_act_fusion(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 04 23:45:53 UTC 2024
    - 14K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfr/ir/tfr_ops.td

    def TFR_TFRQuantActRangeOp : TFR_Op<"quant_act_range", [Pure]> {
      let description = [{
       The `quant_act_range` returns the a pair of integers to indicate the fixed
       range for the fused activation `act` with the quantization defined by the
       `scale` and `zero point`. Currently, the allowed activations are
       `NONE`, `RELU`, `RELU6` and `RELU_N1_TO_1`.
    
        Example:
    
        ```mlir
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 10:54:29 UTC 2024
    - 17.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

    // Ternary ops patterns.
    //===----------------------------------------------------------------------===//
    // Multi-pattern consisting of matching stand-alone convolution op followed by
    // activation op.
    multiclass FuseActFnIntoConvOpPat<Op ActFnOp, ConstantStrAttr ActFnAttr> {
      def FuseActivationFuncWithConv#ActFnOp#ActFnAttr : Pat<
        (ActFnOp (TFL_Conv2DOp:$conv_out $input, $filter, $bias, $h_factor,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir

    // dynamic batch dimension is properly quantized.
    
    // Note that this checks for identical condition as
    // quantize_conv_with_bias_dynamic_fn, omitting stablehlo.maximum.
    // This is because activation clipping which includes 0.0f can be simply
    // omitted from the graph as the lifted function's out_scale and out_zp are
    // already calculated based on the clipped distribution.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 91.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto

      QuantizationSpecs specs = 4;
    
      // Configures the quantization debugger.
      DebuggerConfig debugger_config = 5;
    
      // Defines calibration options for quantization. This option is only used for
      // activation of static range quantization (SRQ). Quantization calibration
      // method is set to MIN_MAX by default.
      CalibrationOptions calibration_options = 6;
    
      // Path to file to save the quantization report, which is essentially a
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 14.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir

    // CHECK: %[[QCONST_0:.+]] =  "tfl.pseudo_qconst"()
    // CHECK: "tfl.batch_matmul"(%[[ARG]], %[[QCONST_0]]) <{adj_x = false, adj_y = false}>
    
    // -----
    
    // Tests static range quantized dot_general with activation as RHS
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 106.2K bytes
    - Viewed (0)
Back to top