Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 24 for Motivation (0.1 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir

    // RUN: stablehlo-quant-opt %s -stablehlo-process-nchw-tensor \
    // RUN:   -split-input-file -verify-diagnostics | FileCheck %s
    
    // Tests that a `convolution(%activation, %weight)` with the activation tensor
    // NCHW format is converted to NHWC convolution. Transpose ops are inserted to
    // the activation and output to match the function signature. The weight
    // constant is transposed.
    
    // CHECK-LABEL: nchw_conv
    // CHECK-SAME: %[[ARG:.+]]: tensor<1x8x4x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 12.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/defer_activation_transpose.mlir

    // RUN: stablehlo-quant-opt %s -stablehlo-defer-activation-transpose \
    // RUN:   -split-input-file -verify-diagnostics | FileCheck %s
    
    // Tests that an `add(transpose(arg0), arg1)` pattern is converted to
    // `transpose(add(arg0, transpose(arg1)))`. The transpose in the activation is
    // deferred to the output of `stablehlo.add` and an extra transpose op is
    // inserted to the RHS to match the shape of the operand.
    
    // CHECK-LABEL: add_with_activation_transpose
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 14.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/tensorflow/fallback_to_flex_patterns.td

      (TF_MaximumOp (TF_ConstOp:$cst $cst_val), $input),
      (TF_MaximumOp $input, $cst)>;
    
    def SwapMinimumOperands : Pat<
      (TF_MinimumOp (TF_ConstOp:$cst $cst_val), $input),
      (TF_MinimumOp $input, $cst)>;
    
    // Relu1 activation is represented as a couple of Max and Min ops, The following
    // patterns recognize and keep them as TF ops so they can be converted to the
    // TFLite Relu1 op.
    def MatchRelu1Pattern1 : Pat<
      (TF_MinimumOp:$min_op
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Sep 29 21:02:21 UTC 2022
    - 3.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.proto

        // conversion, then dequantized during inference.
        // Activation: f32, Weight: qi8, Bias: f32
        WEIGHT_ONLY = 1;
    
        // Apply default dynamic range quantization. Quantized tensor value's
        // ranges are determined during graph runtime.
        // Activation: f32, Weight: qi8, Bias: f32
        POST_TRAINING_QUANTIZATION_DYNAMIC_RANGE = 2;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 22 02:20:05 UTC 2023
    - 3.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.td

       (IsConstTensor $filter),
       (IsInt32ElementType $conv),
       (HasStaticShapeConstraint $filter),
       (HasStaticShapeAtDimsConstraint<"3"> $input)],
      [], (addBenefit 10)>;
    
    // Convert Conv2D with hybrid inputs (f32 activation/int8 weight) to XlaConv
    def ConvertTFConv2DToXLAConvOpWeightOnly : Pat<
      (TF_Conv2DOp:$conv
        $input,
        (TF_MulOp (TF_CastOp (TF_IdentityOp $filter), $truncate1), $scale),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 21.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/passes/lift_quantizable_spots_as_functions_fusion.td

    //===----------------------------------------------------------------------===//
    // Pattern rules for lifting ops with activation as functions
    //===----------------------------------------------------------------------===//
    
    def LiftConvWithRelu : Pat<
      (StableHLO_MaxOp:$res
        (StableHLO_ConvolutionOp $lhs, $rhs, $window_strides, $padding,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 04 07:19:09 UTC 2024
    - 23.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td

        there's any, and set it to True. The reason behind this decision is that
        generally activations of these ops show better accuracy with asymmetric
        input quantization so we want to deprecate symmetric activation quantization
        for those ops eventually.
        - Unlike to the old quantizer, per-channel quantization is supported for
        weight-only TransposeConvOp.
      }];
    
      let methods = [
        InterfaceMethod<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.td

       (IsEinsumSupportedByXlaDotV2 $equation)],
      [], (addBenefit 5)>;
    
    //===----------------------------------------------------------------------===//
    // Pattern rules for lifting ops with bias and activation as functions
    //===----------------------------------------------------------------------===//
    
    multiclass LiftCompositeOpsWithActivation<Op ActivationOp, string ActivationName> {
      def LiftConvWith#ActivationOp : Pat<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 15.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td

      }];
      let dependentDialects = ["mlir::stablehlo::StablehloDialect"];
    }
    
    def DeferActivationTransposePass : Pass<"stablehlo-defer-activation-transpose", "mlir::func::FuncOp"> {
      let summary = "Merges stablehlo.transpose for activations.";
      let description = [{
        Defers activation transposes (e.g. LHS of `stablehlo.add`) to the output and
        optionally inserts `stablehlo.transpose`s to match the shape of operands.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 10.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir

        %mul = "tf.Mul"(%cast, %scale) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
        func.return %mul : tensor<*xf32>
      }
    
      // Requantizes and clips to the range of quantized type if there is no specific activation.
      func.func private @internal_requantize_no_activation_fn(%accumulation : tensor<*xi32>,
                             %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 30.6K bytes
    - Viewed (0)
Back to top