- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 144 for relu (0.05 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_begin.mlir
// CHECK: %[[TANH:[0-9]*]] = "tf.Tanh"(%[[ARG_TRANSPOSE]]) {{.*}} tensor<1x8x4x4xf32> // CHECK: %[[RELU:[0-9]*]] = "tf.Relu"(%[[TANH]]) {{.*}} tensor<1x8x4x4xf32> // CHECK: return %[[RELU]] %0 = "tf.Tanh"(%arg0) : (tensor<1x4x4x8xf32>) -> tensor<1x4x4x8xf32> %1 = "tf.Relu"(%0) : (tensor<1x4x4x8xf32>) -> tensor<1x4x4x8xf32> %2 = "tf.Const"() {value = dense<[0, 3, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 6.3K bytes - Viewed (0) -
tensorflow/c/experimental/ops/nn_ops.h
// Computes rectified linear gradients for a Relu operation. Status ReluGrad(AbstractContext* ctx, AbstractTensorHandle* const gradients, AbstractTensorHandle* const features, AbstractTensorHandle** backprops, const char* name = nullptr, const char* raw_device_name = nullptr); // Computes rectified linear: `max(features, 0)`. Status Relu(AbstractContext* ctx, AbstractTensorHandle* const features,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 10 19:11:36 UTC 2022 - 2.6K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/nn_grad_test.cc
using tensorflow::TF_StatusPtr; Status ReluModel(AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> inputs, absl::Span<AbstractTensorHandle*> outputs) { return ops::Relu(ctx, inputs[0], &outputs[0], "Relu"); } Status SparseSoftmaxCrossEntropyWithLogitsModel( AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> inputs, absl::Span<AbstractTensorHandle*> outputs) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 28 13:53:47 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/python/op_reg_gen_test.py
@composite.Composite( 'TestNoOp', derived_attrs=['T: numbertype'], outputs=['o1: T']) def _composite_no_op(): pass @Composite( 'TestCompositeOp', inputs=['x: T', 'y: T'], attrs=['act: {"", "relu"}', 'trans: bool = true'], derived_attrs=['T: numbertype'], outputs=['o1: T', 'o2: T']) def _composite_op(x, y, act, trans): return x + act, y + trans class TFRGenTensorTest(test.TestCase):
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 28 21:37:05 UTC 2021 - 2.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_end.mlir
// CHECK: %[[TANH:[0-9]*]] = "tf.Tanh"(%arg0) {{.*}} tensor<1x4x4x8xf32> // CHECK: %[[RELU:[0-9]*]] = "tf.Relu"(%[[TANH]]) {{.*}} tensor<1x4x4x8xf32> // CHECK: %[[RES_TRANSPOSE:[0-9]*]] = "tf.Transpose"(%[[RELU]], %[[RES_PERM]]) // CHECK: return %[[RES_TRANSPOSE]] %0 = "tf.Const"() {value = dense<[0, 3, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 9.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/svdf_v2.mlir
// CHECK-NEXT: outputs: [ 5 ], // CHECK-NEXT: builtin_options_type: SVDFOptions, // CHECK-NEXT: builtin_options: { // CHECK-NEXT: rank: 2, // CHECK-NEXT: fused_activation_function: RELU // CHECK-NEXT: } // CHECK-NEXT: } ], // CHECK-NEXT: name: "main" // CHECK-NEXT: } ], // CHECK-NEXT: description: "MLIR Converted.", // CHECK-NEXT: buffers: [ { // CHECK-EMPTY:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 14 16:41:28 UTC 2022 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/examples/mnist/mnist_train.py
# output shape: [-1, 28, 28, 32] conv1 = gen_mnist_ops.new_conv2d(x, self.weights['f1'], self.biases['b1'], 1, 1, 1, 1, 'SAME', 'RELU') # Max pooling. The kernel size spec {ksize} also follows the layout of # the data. Here we have a pooling window of 2, and a stride of 2. # output shape: [-1, 14, 14, 32]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 20 03:05:18 UTC 2021 - 6.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/svdf.mlir
// CHECK-NEXT: outputs: [ 5 ], // CHECK-NEXT: builtin_options_type: SVDFOptions, // CHECK-NEXT: builtin_options: { // CHECK-NEXT: rank: 2, // CHECK-NEXT: fused_activation_function: RELU // CHECK-NEXT: } // CHECK-NEXT: } ], // CHECK-NEXT: name: "main" // CHECK-NEXT: } ], // CHECK-NEXT: description: "MLIR Converted.", // CHECK-NEXT: buffers: [ { // CHECK-EMPTY:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 14 16:41:28 UTC 2022 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/examples/mnist/mnist_ops_test.py
'input_': input_, 'filter_': filter_, 'bias': bias, 'stride_w': 2, 'stride_h': 2, 'dilation_w': 1, 'dilation_h': 1, 'padding': 'SAME', 'act': 'RELU' } self._assertOpAndComposite([input_, filter_, bias], tf.function(gen_mnist_ops.new_conv2d), ops_defs._composite_conv_add_relu, kwargs)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 28 21:37:05 UTC 2021 - 4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/README.md
attrs=['act: {"", "RELU", "RELU6", "TANH"} = ""'], derived_attrs=['T: {float, int8}'], outputs=['o: T']) def _composite_fully_connected(input_, filter_, bias, act): res = tf.raw_ops.MatMul( a=input_, b=filter_, transpose_a=False, transpose_b=True) res = tf.raw_ops.Add(x=res, y=bias) if act == 'RELU': return tf.raw_ops.Relu(features=res) elif act == 'RELU6': return tf.raw_ops.Relu6(features=res)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 29 18:32:13 UTC 2022 - 6.2K bytes - Viewed (0)