- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 31 for RELU (0.13 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
// Currently, GPU only supports Conv2D+BiasAdd+Relu fusion. if (IsGpuDevice(conv)) { auto activation = GetActivation(bias_add); if (!activation || activation->getName().stripDialect() != "Relu" || !bias_add.getOutput().hasOneUse()) { (void)rewriter.notifyMatchFailure(conv, [&](Diagnostic &diag) { diag << "GPU only supports Conv2D+BiasAdd+Relu fusion"; }); return false;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/tfl_while_outline.mlir
%14 = "tfl.relu"(%10#1) : (tensor<4x2xf32>) -> tensor<4x2xf32> %15 = "tfl.logistic"(%10#0) : (tensor<4x2xf32>) -> tensor<4x2xf32> %16 = tfl.mul %15, %14 {fused_activation_function = "NONE"} : tensor<4x2xf32> %17 = tfl.add %13, %16 {fused_activation_function = "NONE"} : tensor<4x2xf32> %18 = "tfl.relu"(%17) : (tensor<4x2xf32>) -> tensor<4x2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 13.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir
%broad2_s = "quantfork.stats"(%broad2) {layerStats = dense<[0.000000e+00, 1.000000e+01]> : tensor<2xf32>} : (tensor<?x26x26x26x16xf32>) -> tensor<?x26x26x26x16xf32> %add = "tfl.add"(%broad1_s, %broad2_s) {fused_activation_function = "RELU"} : (tensor<?x26x26x26x16xf32>, tensor<?x26x26x26x16xf32>) -> tensor<?x26x26x26x16xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 38.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_patterns.td
(TFL_RangeOp $start, $limit, $delta)>; def LegalizeRelu6 : Pat<(TF_Relu6Op $arg), (TFL_Relu6Op $arg)>; def LegalizeRelu : Pat<(TF_ReluOp $arg), (TFL_ReluOp $arg)>; // TFL Relu doesn't support I32/I64 type, so legalizes TF Relu to TFL Maximum. def LegalizeReluI32 : Pat<(TF_ReluOp TensorOf<[I32]>:$arg), (TFL_MaximumOp $arg, (Arith_ConstantOp ConstantAttr<RankedI32ElementsAttr<[]>,"0">))>;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 04 13:30:42 UTC 2024 - 28.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/mlrt/tpu_conversions.mlir
// CHECK-NEXT: tf_mlrt.executeop.device{{.*}}op: \22Relu\22 // CHECK-NEXT: tf_mlrt_tpu.compile_and_execute %0 = "tf.Cast"(%arg0) {__op_key = 0: i32, device = "/device:CPU:0"} : (tensor<i32>) -> tensor<f32> %1 = "tf.Relu"(%0) {__op_key = 1: i32, device = "/device:CPU:0"} : (tensor<f32>) -> (tensor<f32>)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 04 21:25:31 UTC 2023 - 11K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.td
(NamedAttr<"equation"> $equation))), [(IsNotInLiftedFunc $res), (IsEinsumSupportedByXlaDotV2 $equation)], [], (addBenefit 10)>; } defm : LiftCompositeOpsWithActivation<TF_ReluOp, "relu">; defm : LiftCompositeOpsWithActivation<TF_Relu6Op, "relu6">; def LiftGather : Pat< (TF_GatherV2Op:$res $params, $indices, $axis, $batch_dims), (LiftAsTFPartitionedCall<"composite_gather_fn">
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 15.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/test_schema.fbs
HASHTABLE_LOOKUP = 10, L2_NORMALIZATION = 11, L2_POOL_2D = 12, LOCAL_RESPONSE_NORMALIZATION = 13, LOGISTIC = 14, LSH_PROJECTION = 15, LSTM = 16, MAX_POOL_2D = 17, MUL = 18, RELU = 19, // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed // since different model developers use RELU1 in different ways. Never // create another op called RELU1. RELU_N1_TO_1 = 20,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 19 19:46:06 UTC 2021 - 26.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/convert-tf-quant-types.mlir
// CHECK-LABEL: func @relu_qint8 func.func @relu_qint8(%arg0: tensor<1x!tf_type.qint8>) -> tensor<1x!tf_type.qint8> { // CHECK: %[[X:.*]] = "tf.Relu"(%arg0) : (tensor<1xi8>) -> tensor<1xi8> %0 = "tf.Relu"(%arg0) : (tensor<1x!tf_type.qint8>) -> tensor<1x!tf_type.qint8> func.return %0: tensor<1x!tf_type.qint8> } // -----
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 25.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir
%0 = stablehlo.constant dense<2.000000e+00> : tensor<4x2x3x3xf32> // weight %1 = stablehlo.constant dense<3.000000e+00> : tensor<4xf32> // bias %2 = stablehlo.constant dense<0.000000e+00> : tensor<1x4x5x5xf32> // relu %3 = stablehlo.broadcast_in_dim %1, dims = [1] : (tensor<4xf32>) -> tensor<1x4x5x5xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 12.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs
HASHTABLE_LOOKUP = 10, L2_NORMALIZATION = 11, L2_POOL_2D = 12, LOCAL_RESPONSE_NORMALIZATION = 13, LOGISTIC = 14, LSH_PROJECTION = 15, LSTM = 16, MAX_POOL_2D = 17, MUL = 18, RELU = 19, // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed // since different model developers use RELU1 in different ways. Never // create another op called RELU1. RELU_N1_TO_1 = 20,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 14:28:27 UTC 2024 - 30K bytes - Viewed (0)