- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 13 for Motivation (0.12 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir
// RUN: stablehlo-quant-opt %s -stablehlo-process-nchw-tensor \ // RUN: -split-input-file -verify-diagnostics | FileCheck %s // Tests that a `convolution(%activation, %weight)` with the activation tensor // NCHW format is converted to NHWC convolution. Transpose ops are inserted to // the activation and output to match the function signature. The weight // constant is transposed. // CHECK-LABEL: nchw_conv // CHECK-SAME: %[[ARG:.+]]: tensor<1x8x4x4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 12.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/defer_activation_transpose.mlir
// RUN: stablehlo-quant-opt %s -stablehlo-defer-activation-transpose \ // RUN: -split-input-file -verify-diagnostics | FileCheck %s // Tests that an `add(transpose(arg0), arg1)` pattern is converted to // `transpose(add(arg0, transpose(arg1)))`. The transpose in the activation is // deferred to the output of `stablehlo.add` and an extra transpose op is // inserted to the RHS to match the shape of the operand. // CHECK-LABEL: add_with_activation_transpose
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 14.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.td
(IsConstTensor $filter), (IsInt32ElementType $conv), (HasStaticShapeConstraint $filter), (HasStaticShapeAtDimsConstraint<"3"> $input)], [], (addBenefit 10)>; // Convert Conv2D with hybrid inputs (f32 activation/int8 weight) to XlaConv def ConvertTFConv2DToXLAConvOpWeightOnly : Pat< (TF_Conv2DOp:$conv $input, (TF_MulOp (TF_CastOp (TF_IdentityOp $filter), $truncate1), $scale),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 21.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/lift_quantizable_spots_as_functions_fusion.td
//===----------------------------------------------------------------------===// // Pattern rules for lifting ops with activation as functions //===----------------------------------------------------------------------===// def LiftConvWithRelu : Pat< (StableHLO_MaxOp:$res (StableHLO_ConvolutionOp $lhs, $rhs, $window_strides, $padding,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 04 07:19:09 UTC 2024 - 23.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.td
(IsEinsumSupportedByXlaDotV2 $equation)], [], (addBenefit 5)>; //===----------------------------------------------------------------------===// // Pattern rules for lifting ops with bias and activation as functions //===----------------------------------------------------------------------===// multiclass LiftCompositeOpsWithActivation<Op ActivationOp, string ActivationName> { def LiftConvWith#ActivationOp : Pat<
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 15.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td
}]; let dependentDialects = ["mlir::stablehlo::StablehloDialect"]; } def DeferActivationTransposePass : Pass<"stablehlo-defer-activation-transpose", "mlir::func::FuncOp"> { let summary = "Merges stablehlo.transpose for activations."; let description = [{ Defers activation transposes (e.g. LHS of `stablehlo.add`) to the output and optionally inserts `stablehlo.transpose`s to match the shape of operands.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 10.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir
%mul = "tf.Mul"(%cast, %scale) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> func.return %mul : tensor<*xf32> } // Requantizes and clips to the range of quantized type if there is no specific activation. func.func private @internal_requantize_no_activation_fn(%accumulation : tensor<*xi32>, %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 30.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/merge-fusion-with-dequantize.mlir
return %1 : tensor<1x3x!quant.uniform<i8:f32, 1.000000e-03:-3>> } } // ----- // Merge fusion with dequantize for no activation case. module attributes {tf_saved_model.semantics} { // CHECK-LABEL: func.func private @merge_no_act_fusion func.func private @merge_no_act_fusion(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 04 23:45:53 UTC 2024 - 14K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/ir/tfr_ops.td
def TFR_TFRQuantActRangeOp : TFR_Op<"quant_act_range", [Pure]> { let description = [{ The `quant_act_range` returns the a pair of integers to indicate the fixed range for the fused activation `act` with the quantization defined by the `scale` and `zero point`. Currently, the allowed activations are `NONE`, `RELU`, `RELU6` and `RELU_N1_TO_1`. Example: ```mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 10:54:29 UTC 2024 - 17.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
QuantizationSpecs specs = 4; // Configures the quantization debugger. DebuggerConfig debugger_config = 5; // Defines calibration options for quantization. This option is only used for // activation of static range quantization (SRQ). Quantization calibration // method is set to MIN_MAX by default. CalibrationOptions calibration_options = 6; // Path to file to save the quantization report, which is essentially a
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 14.3K bytes - Viewed (0)