- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 47 for RELU (0.11 sec)
-
tensorflow/compiler/mlir/tensorflow/utils/cluster_util_test.cc
func.func @main(%arg0: tensor<?xi32>) -> (tensor<?xi32>, tensor<?xi32>) { %0 = "tf.Relu"(%arg0) : (tensor<?xi32>) -> tensor<?xi32> %1 = "tf.Relu"(%0) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32> %2 = "tf.Add"(%0, %1) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32> %3 = "tf.Relu"(%2) : (tensor<?xi32>) -> tensor<?xi32> %4 = "tf.Relu"(%1) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/basic_lstm.mlir
// CHECK-LABEL: @main
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 1.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir
} : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32> %4 = "tf.BiasAdd"(%3, %cst) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32> %5 = "tf.Relu"(%4) {device = ""} : (tensor<*xf32>) -> tensor<*xf32> %6 = "tf.Conv2D"(%arg0, %arg1) { data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 26.5K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/nn_grad_test.cc
using tensorflow::TF_StatusPtr; Status ReluModel(AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> inputs, absl::Span<AbstractTensorHandle*> outputs) { return ops::Relu(ctx, inputs[0], &outputs[0], "Relu"); } Status SparseSoftmaxCrossEntropyWithLogitsModel( AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> inputs, absl::Span<AbstractTensorHandle*> outputs) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 28 13:53:47 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass_test.cc
Node* b = ops::UnaryOp("Relu", a, builder.opts().WithName("B")); Node* c = ops::UnaryOp("Relu", b, builder.opts().WithName("C")); Node* d = ops::UnaryOp("UncompilableUnary", c, builder.opts().WithName("D")); Node* e = ops::UnaryOp("Relu", d, builder.opts().WithName("E")); ops::UnaryOp("Relu", e, builder.opts().WithName("F"));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 10:11:10 UTC 2024 - 79.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir
{"quantized_ops": ["${main_op}", "Relu"], "act_func": "internal_requantize_and_relu_fn", "output_type": "i8"}, {"quantized_ops": ["${main_op}", "Relu6"], "act_func": "internal_requantize_and_relu6_fn", "output_type": "i8"}, {"quantized_ops": ["${main_op}"], "act_func": "internal_dequantize_no_activation_fn", "output_type": "f32"},
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 30.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py
# If present the last op before return should be stablehlo.clamp for relu6 # and stablehlo.maximum for relu. if activation_fn is nn_ops.relu6: self.assertRegex(module_str, r'stablehlo.clamp.*\n.*return') elif activation_fn is nn_ops.relu: self.assertRegex(module_str, r'stablehlo.maximum.*\n.*return') else: # Check activation functions are implicit.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 51.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver_test.cc
%0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<1x4x4x3xf32>, tensor<3x1x1x3xf32>, tensor<3xf32>) -> tensor<1x4x4x3xf32> return %0 : tensor<1x4x4x3xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir
dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32 } : (tensor<1x5x5x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<1x5x5x3xf32> %conv2 = "tfl.conv_2d"(%0, %w, %b2) { dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 18.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
// Currently, GPU only supports Conv2D+BiasAdd+Relu fusion. if (IsGpuDevice(conv)) { auto activation = GetActivation(bias_add); if (!activation || activation->getName().stripDialect() != "Relu" || !bias_add.getOutput().hasOneUse()) { (void)rewriter.notifyMatchFailure(conv, [&](Diagnostic &diag) { diag << "GPU only supports Conv2D+BiasAdd+Relu fusion"; }); return false;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.9K bytes - Viewed (0)