- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 18 for Selu (0.03 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td
(CreateTFShapeOp $input, $input, ConstBoolAttrTrue))), [(TensorOf<[TF_Int, TF_Float, TF_Complex]> $updates)]>; //===----------------------------------------------------------------------===// // Selu op patterns. //===----------------------------------------------------------------------===// def getScale : NativeCodeCall< "GetScalarOfType(getElementTypeOrSelf($0), 1.0507009873554804934193349852946)" >;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 04 13:30:42 UTC 2024 - 24.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir
} : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32> %4 = "tf.BiasAdd"(%3, %cst) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32> %5 = "tf.Relu"(%4) {device = ""} : (tensor<*xf32>) -> tensor<*xf32> %6 = "tf.Conv2D"(%arg0, %arg1) { data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 26.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema.fbs
HASHTABLE_LOOKUP = 10, L2_NORMALIZATION = 11, L2_POOL_2D = 12, LOCAL_RESPONSE_NORMALIZATION = 13, LOGISTIC = 14, LSH_PROJECTION = 15, LSTM = 16, MAX_POOL_2D = 17, MUL = 18, RELU = 19, // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed // since different model developers use RELU1 in different ways. Never // create another op called RELU1. RELU_N1_TO_1 = 20,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir
dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32 } : (tensor<1x5x5x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<1x5x5x3xf32> %conv2 = "tfl.conv_2d"(%0, %w, %b2) { dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 18.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
// Currently, GPU only supports Conv2D+BiasAdd+Relu fusion. if (IsGpuDevice(conv)) { auto activation = GetActivation(bias_add); if (!activation || activation->getName().stripDialect() != "Relu" || !bias_add.getOutput().hasOneUse()) { (void)rewriter.notifyMatchFailure(conv, [&](Diagnostic &diag) { diag << "GPU only supports Conv2D+BiasAdd+Relu fusion"; }); return false;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs
HASHTABLE_LOOKUP = 10, L2_NORMALIZATION = 11, L2_POOL_2D = 12, LOCAL_RESPONSE_NORMALIZATION = 13, LOGISTIC = 14, LSH_PROJECTION = 15, LSTM = 16, MAX_POOL_2D = 17, MUL = 18, RELU = 19, // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed // since different model developers use RELU1 in different ways. Never // create another op called RELU1. RELU_N1_TO_1 = 20,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 14:28:27 UTC 2024 - 30K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/composite-lowering.mlir
return %2 : tensor<*xf32> } // CHECK-LABEL: gelu_aten // CHECK: %0 = "tfl.gelu"(%arg0) <{approximate = false}> : (tensor<5x10xf32>) -> tensor<5x10xf32> func.func private @gelu_decomp_2(%arg0: tensor<5x10xf32>) -> tensor<5x10xf32> func.func @gelu_aten_approximate(%arg0: tensor<5x10xf32>) -> (tensor<*xf32>) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 32.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/tfl_while_outline.mlir
%14 = "tfl.relu"(%10#1) : (tensor<4x2xf32>) -> tensor<4x2xf32> %15 = "tfl.logistic"(%10#0) : (tensor<4x2xf32>) -> tensor<4x2xf32> %16 = tfl.mul %15, %14 {fused_activation_function = "NONE"} : tensor<4x2xf32> %17 = tfl.add %13, %16 {fused_activation_function = "NONE"} : tensor<4x2xf32> %18 = "tfl.relu"(%17) : (tensor<4x2xf32>) -> tensor<4x2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 13.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td
(MHLO_ConstantOp:$one (GetScalarOfType<1> $features)), (BinBroadcastDimensions $one, $features))))>; //===----------------------------------------------------------------------===// // Relu op patterns. //===----------------------------------------------------------------------===// // TODO(hinsu): Make these patterns to TF to TF lowering. Relu6 lowering will
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 06 18:46:23 UTC 2024 - 34.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir
%broad2_s = "quantfork.stats"(%broad2) {layerStats = dense<[0.000000e+00, 1.000000e+01]> : tensor<2xf32>} : (tensor<?x26x26x26x16xf32>) -> tensor<?x26x26x26x16xf32> %add = "tfl.add"(%broad1_s, %broad2_s) {fused_activation_function = "RELU"} : (tensor<?x26x26x26x16xf32>, tensor<?x26x26x26x16xf32>) -> tensor<?x26x26x26x16xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 38.2K bytes - Viewed (0)