- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 335 for relu (0.08 sec)
-
tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir
%2 = "tfl.pseudo_const"() {value = dense<0.000000e+00> : tensor<3xf32>} : () -> tensor<3xf32> %3 = "tfl.conv_2d"(%0, %1, %2) { dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : ( tensor<?x5x5x2xf32>, tensor<3x5x5x2xf32>, tensor<3xf32>) -> tensor<?x1x1x3xf32> %4 = "quantfork.stats"(%3) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 15.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/lift_quantizable_spots_as_functions.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 49.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/merge-fusion-with-dequantize.mlir
// RUN: stablehlo-quant-opt %s -stablehlo-merge-fusion-with-dequantize -split-input-file -verify-diagnostics | FileCheck %s // Merge fusion with dequantize for relu case. module attributes {tf_saved_model.semantics} { // CHECK-LABEL: func.func private @merge_relu_fusion func.func private @merge_relu_fusion(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 04 23:45:53 UTC 2024 - 14K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-prefer-tf2xla.mlir
data_format = "NHWC", dilations = [1, 1, 1, 1], epsilon = 9.99999974E-5 : f32, explicit_paddings = [], filter_format = "HWIO", fused_ops = ["BiasAdd", "Relu"], leakyrelu_alpha = 2.000000e-01 : f32, num_args = 2 : i64, operandSegmentSizes = array<i32: 1, 1, 2, 2>, padding = "SAME", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 06 15:32:52 UTC 2024 - 15.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py
padding: str = 'SAME', has_func_alias: bool = False, ) -> module.Module: class ConvModel(module.Module): """A simple model with a single conv2d, bias and relu.""" def __init__(self): self.out_channel_size = filter_shape[-1] # This ensures filters will have different value range per out channel self.filters = np.stack( [
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 18.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/ir/tfr_ops.td
range for the fused activation `act` with the quantization defined by the `scale` and `zero point`. Currently, the allowed activations are `NONE`, `RELU`, `RELU6` and `RELU_N1_TO_1`. Example: ```mlir %3, %4 = tfr.quant_act_range(%2, %1, %0) : (tfr.attr, float, i64) -> (tfr.tensor, tfr.tensor) ``` }]; let arguments = (ins
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 10:54:29 UTC 2024 - 17.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td
(BinBroadcastDimensions $one, $features))))>; //===----------------------------------------------------------------------===// // Relu op patterns. //===----------------------------------------------------------------------===// // TODO(hinsu): Make these patterns to TF to TF lowering. Relu6 lowering will // require HLO canonicalization of min and max on a tensor to ClampOp.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 06 18:46:23 UTC 2024 - 34.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
let hasFolder = 1; } def TFL_ReluOp: TFL_Op<"relu", [ PredOpTrait<"x and y must have same element type", TFL_TCresVTEtIsSameAsOp<0, 0>>, Pure, QuantizableResult, SameOperandsAndResultShape]> { let summary = "Relu operator"; let description = [{ Element-wise Relu operator x -> max(0, x) }];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 132.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/ops.mlir
// CHECK: "RELU" %1 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU"} : tensor<4xi32> // CHECK: "RELU_N1_TO_1" %2 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU_N1_TO_1"} : tensor<4xi32> // CHECK: "RELU6" %3 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU6"} : tensor<4xi32> // CHECK: "TANH"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 189.2K bytes - Viewed (0)