- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 401 for relu (0.07 sec)
-
tensorflow/compiler/mlir/tfr/passes/decompose_patterns.td
(TFR_ConstantTensorOp (Arith_ConstantOp ConstantAttr<I32Attr, "127">))]>; def QuantActRangeReluPattern : Pattern< (TFR_TFRQuantActRangeOp (TFR_ConstOp HasStringAttr<"RELU">:$act), (ConstantLikeMatcher F32Attr:$scale), (ConstantLikeMatcher I64Attr:$zp)), [(TFR_ConstantTensorOp (Arith_ConstantOp (Quantize<"0.0f"> $scale, $zp))),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Sep 29 21:02:21 UTC 2022 - 2.4K bytes - Viewed (0) -
tensorflow/c/experimental/ops/update_cpp_ops.sh
MatMul \ Neg \ Sum \ Sub \ Div \ DivNoNan \ Exp \ Sqrt \ SqrtGrad \ Log1p ${generate} \ --category=nn \ SparseSoftmaxCrossEntropyWithLogits \ ReluGrad \ Relu \ BiasAdd \ BiasAddGrad ${generate} \ --category=resource_variable \ VarHandleOp \ ReadVariableOp \ AssignVariableOp \ DestroyResourceOp ${generate} \ --category=io \
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 17 17:54:34 UTC 2022 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py
# Check activation functions are explicitly present. # If present the last op before return should be stablehlo.clamp for relu6 # and stablehlo.maximum for relu. if activation_fn is nn_ops.relu6: self.assertRegex(module_str, r'stablehlo.clamp.*\n.*return') elif activation_fn is nn_ops.relu: self.assertRegex(module_str, r'stablehlo.maximum.*\n.*return') else:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 51.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/keras.py
def mnist_model(): """Creates a MNIST model.""" model = tf.keras.models.Sequential() model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation='relu')) model.add(tf.keras.layers.Dense(10, activation='softmax')) return model class TestModule(tf.Module): def __init__(self): super(TestModule, self).__init__() self.model = mnist_model()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 28 21:37:05 UTC 2021 - 1.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir
%cst = "arith.constant"() {value = dense<[[[1.66394591, 3.61694336, 2.0382936]]]> : tensor<1x1x3xf32>} : () -> tensor<1x1x3xf32> %prelu = "tfl.prelu"(%arg0, %cst) : (tensor<1x10x10x3xf32>, tensor<1x1x3xf32>) -> tensor<1x10x10x3xf32> func.return %prelu : tensor<1x10x10x3xf32> // CHECK: %[[cst:.*]] = arith.constant dense<[{{\[}}[1.66394591, 3.61694336, 2.0382936]]]> : tensor<1x1x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 18.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/tests/decompose.mlir
%relu_attr = tfr.constant "RELU" -> !tfr.attr %relu6_attr = tfr.constant "RELU6" -> !tfr.attr %reluN1_1_attr = tfr.constant "RELU_N1_TO_1" -> !tfr.attr %none:2 = "tfr.quant_act_range"(%none_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor) %relu:2 = "tfr.quant_act_range"(%relu_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 16.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/insert_fallback_tensor_copy.mlir
// CHECK-NOT: tfrt_fallback_async.copy_if_small %0 = tfrt_fallback_async.executeop key(0) cost(1024) device("/job:localhost/replica:0/task:0/device:CPU:0") "tf.Relu"(%arg) {T = f32} : 1 %1 = tfrt_fallback_async.executeop key(0) cost(1024) device("/job:localhost/replica:0/task:0/device:CPU:0") "tf.Relu"(%arg) {T = f32} : 1 tfrt.return %0, %1 : !tfrt_fallback.tf_tensor, !tfrt_fallback.tf_tensor
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 25 10:51:48 UTC 2022 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver_test.cc
%0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<1x4x4x3xf32>, tensor<3x1x1x3xf32>, tensor<3xf32>) -> tensor<1x4x4x3xf32> return %0 : tensor<1x4x4x3xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
// Currently, GPU only supports Conv2D+BiasAdd+Relu fusion. if (IsGpuDevice(conv)) { auto activation = GetActivation(bias_add); if (!activation || activation->getName().stripDialect() != "Relu" || !bias_add.getOutput().hasOneUse()) { (void)rewriter.notifyMatchFailure(conv, [&](Diagnostic &diag) { diag << "GPU only supports Conv2D+BiasAdd+Relu fusion"; }); return false;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions.mlir
// CHECK: func private @quantized_conv2d_with_relu6_fn // CHECK: func private @quantized_depthwise_conv2d_with_bias_and_relu_float_output_fn // CHECK-SAME: tf_quant.quantized_ops = ["DepthwiseConv2D", "BiasAdd", "Relu"] // CHECK: func private @quantized_matmul_with_bias_fn // CHECK: func private @quantized_matmul_with_bias_and_relu_fn // CHECK: func private @quantized_matmul_with_bias_and_relu6_fn // CHECK: func private @quantized_matmul_fn
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 3.3K bytes - Viewed (0)