- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 12 for Motivation (0.15 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir
// RUN: stablehlo-quant-opt %s -stablehlo-process-nchw-tensor \ // RUN: -split-input-file -verify-diagnostics | FileCheck %s // Tests that a `convolution(%activation, %weight)` with the activation tensor // NCHW format is converted to NHWC convolution. Transpose ops are inserted to // the activation and output to match the function signature. The weight // constant is transposed. // CHECK-LABEL: nchw_conv // CHECK-SAME: %[[ARG:.+]]: tensor<1x8x4x4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 12.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/defer_activation_transpose.mlir
// RUN: stablehlo-quant-opt %s -stablehlo-defer-activation-transpose \ // RUN: -split-input-file -verify-diagnostics | FileCheck %s // Tests that an `add(transpose(arg0), arg1)` pattern is converted to // `transpose(add(arg0, transpose(arg1)))`. The transpose in the activation is // deferred to the output of `stablehlo.add` and an extra transpose op is // inserted to the RHS to match the shape of the operand. // CHECK-LABEL: add_with_activation_transpose
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 14.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td
}]; let dependentDialects = ["mlir::stablehlo::StablehloDialect"]; } def DeferActivationTransposePass : Pass<"stablehlo-defer-activation-transpose", "mlir::func::FuncOp"> { let summary = "Merges stablehlo.transpose for activations."; let description = [{ Defers activation transposes (e.g. LHS of `stablehlo.add`) to the output and optionally inserts `stablehlo.transpose`s to match the shape of operands.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 10.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/ir/tfr_ops.td
def TFR_TFRQuantActRangeOp : TFR_Op<"quant_act_range", [Pure]> { let description = [{ The `quant_act_range` returns the a pair of integers to indicate the fixed range for the fused activation `act` with the quantization defined by the `scale` and `zero point`. Currently, the allowed activations are `NONE`, `RELU`, `RELU6` and `RELU_N1_TO_1`. Example: ```mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 10:54:29 UTC 2024 - 17.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td
// Ternary ops patterns. //===----------------------------------------------------------------------===// // Multi-pattern consisting of matching stand-alone convolution op followed by // activation op. multiclass FuseActFnIntoConvOpPat<Op ActFnOp, ConstantStrAttr ActFnAttr> { def FuseActivationFuncWithConv#ActFnOp#ActFnAttr : Pat< (ActFnOp (TFL_Conv2DOp:$conv_out $input, $filter, $bias, $h_factor,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 66.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir
// dynamic batch dimension is properly quantized. // Note that this checks for identical condition as // quantize_conv_with_bias_dynamic_fn, omitting stablehlo.maximum. // This is because activation clipping which includes 0.0f can be simply // omitted from the graph as the lifted function's out_scale and out_zp are // already calculated based on the clipped distribution.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 91.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
QuantizationSpecs specs = 4; // Configures the quantization debugger. DebuggerConfig debugger_config = 5; // Defines calibration options for quantization. This option is only used for // activation of static range quantization (SRQ). Quantization calibration // method is set to MIN_MAX by default. CalibrationOptions calibration_options = 6; // Path to file to save the quantization report, which is essentially a
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 14.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir
// CHECK: %[[QCONST_0:.+]] = "tfl.pseudo_qconst"() // CHECK: "tfl.batch_matmul"(%[[ARG]], %[[QCONST_0]]) <{adj_x = false, adj_y = false}> // ----- // Tests static range quantized dot_general with activation as RHS
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 106.2K bytes - Viewed (0) -
RELEASE.md
* Add `UnifiedGRU` as the new GRU implementation for tf2.0. Change the default recurrent activation function for GRU from `hard_sigmoid` to `sigmoid`, and `reset_after` to True in 2.0. Historically recurrent activation is `hard_sigmoid` since it is fast than 'sigmoid'. With new unified backend between CPU and GPU mode, since the CuDNN kernel is using sigmoid, we change
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 730.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
QuantizableResult, PredOpTrait<"input and output must have same element type", TFL_TCresVTEtIsSameAsOp<0, 0>>]> { let summary = "Hardswish activation function."; let description = [{ Computes hard-swish activation function f(x) -> (x * relu6(x+3))/6 element-wise. }]; let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8]>:$input);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0)