- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 375 for relu (0.23 sec)
-
tensorflow/compiler/jit/tests/keras_imagenet_main.golden_summary
Conv2D 53 Conv2DBackpropFilter 53 Conv2DBackpropInput 52 Equal 1 FusedBatchNormGradV2 53 FusedBatchNormV2 53 MatMul 3 MaxPool 1 MaxPoolGrad 1 Mean 1 Mul 218 Pad 2 ReadVariableOp 538 Relu 49 ReluGrad 49 Reshape 2 ResourceApplyKerasMomentum 161 Slice 1 Softmax 1 SparseSoftmaxCrossEntropyWithLogits 1 Squeeze 1 Sum 1 Tile 1 Transpose 1 cluster 1 size 815 AddN 1
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 06 10:38:14 UTC 2023 - 874 bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/ir/fallback_opt.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 25 11:03:04 UTC 2022 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir
%0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT" %1 = "tfl.add"(%arg0, %0) {fused_activation_function = "RELU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT" %2 = "tfl.relu"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 19 19:32:06 UTC 2023 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir
{"quantized_ops": ["${main_op}", "Relu"], "act_func": "internal_requantize_and_relu_fn", "output_type": "i8"}, {"quantized_ops": ["${main_op}", "Relu6"], "act_func": "internal_requantize_and_relu6_fn", "output_type": "i8"}, {"quantized_ops": ["${main_op}"], "act_func": "internal_dequantize_no_activation_fn", "output_type": "f32"},
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 30.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/decompose_patterns.td
(TFR_ConstantTensorOp (Arith_ConstantOp ConstantAttr<I32Attr, "127">))]>; def QuantActRangeReluPattern : Pattern< (TFR_TFRQuantActRangeOp (TFR_ConstOp HasStringAttr<"RELU">:$act), (ConstantLikeMatcher F32Attr:$scale), (ConstantLikeMatcher I64Attr:$zp)), [(TFR_ConstantTensorOp (Arith_ConstantOp (Quantize<"0.0f"> $scale, $zp))),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Sep 29 21:02:21 UTC 2022 - 2.4K bytes - Viewed (0) -
tensorflow/c/experimental/ops/update_cpp_ops.sh
MatMul \ Neg \ Sum \ Sub \ Div \ DivNoNan \ Exp \ Sqrt \ SqrtGrad \ Log1p ${generate} \ --category=nn \ SparseSoftmaxCrossEntropyWithLogits \ ReluGrad \ Relu \ BiasAdd \ BiasAddGrad ${generate} \ --category=resource_variable \ VarHandleOp \ ReadVariableOp \ AssignVariableOp \ DestroyResourceOp ${generate} \ --category=io \
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 17 17:54:34 UTC 2022 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py
# Check activation functions are explicitly present. # If present the last op before return should be stablehlo.clamp for relu6 # and stablehlo.maximum for relu. if activation_fn is nn_ops.relu6: self.assertRegex(module_str, r'stablehlo.clamp.*\n.*return') elif activation_fn is nn_ops.relu: self.assertRegex(module_str, r'stablehlo.maximum.*\n.*return') else:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 51.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/keras.py
def mnist_model(): """Creates a MNIST model.""" model = tf.keras.models.Sequential() model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation='relu')) model.add(tf.keras.layers.Dense(10, activation='softmax')) return model class TestModule(tf.Module): def __init__(self): super(TestModule, self).__init__() self.model = mnist_model()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 28 21:37:05 UTC 2021 - 1.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir
%cst = "arith.constant"() {value = dense<[[[1.66394591, 3.61694336, 2.0382936]]]> : tensor<1x1x3xf32>} : () -> tensor<1x1x3xf32> %prelu = "tfl.prelu"(%arg0, %cst) : (tensor<1x10x10x3xf32>, tensor<1x1x3xf32>) -> tensor<1x10x10x3xf32> func.return %prelu : tensor<1x10x10x3xf32> // CHECK: %[[cst:.*]] = arith.constant dense<[{{\[}}[1.66394591, 3.61694336, 2.0382936]]]> : tensor<1x1x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 18.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/tests/decompose.mlir
%relu_attr = tfr.constant "RELU" -> !tfr.attr %relu6_attr = tfr.constant "RELU6" -> !tfr.attr %reluN1_1_attr = tfr.constant "RELU_N1_TO_1" -> !tfr.attr %none:2 = "tfr.quant_act_range"(%none_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor) %relu:2 = "tfr.quant_act_range"(%relu_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 16.7K bytes - Viewed (0)