- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 19 for elu (0.04 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/fused_kernel_matcher.mlir
// CHECK: %[[VAL_0:.*]] = "tf._FusedConv2D"(%arg2, %arg1, %arg0) <{data_format = "NHWC", dilations = [1, 1, 1, 1], epsilon = 0.000000e+00 : f32, explicit_paddings = [], fused_ops = ["BiasAdd", "Elu"], num_args = 1 : i64, operandSegmentSizes = array<i32: 1, 1, 1, 0>, padding = "SAME", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true}> {TArgs = [f32]} : (tensor<8x32x32x3xf32>, tensor<1x1x3x128xf32>, tensor<128xf32>) -> tensor<*xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 13.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/integration/graph_decompose_test.py
def testWithKnownKernel(self): @def_function.function def biasd_dense_elu(x, y, z): dot = gen_composite_ops.my_biased_dense(x, y, z) return nn_ops.elu(dot) # with known kernel, should not expand. t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 28 21:37:05 UTC 2021 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/integration/node_expansion_test.py
self.assertAllEqual(sq.numpy().reshape(-1), [0, 0, 5, 12]) def testWithKnownKernel(self): def biasd_dense_elu(x, y, z): dot = gen_composite_ops.my_biased_dense(x, y, z) return nn_ops.elu(dot) # with known kernel, should not expand. t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 28 21:37:05 UTC 2021 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/resources/decomposition_lib.mlir
} tfr.func @tf__my_biased_dense_(!tfr.tensor<T>, !tfr.tensor<T>, !tfr.tensor<T>, !tfr.attr{tfr.name="act", tfr.default=""}) -> !tfr.tensor attributes {T} // This is a wong decomposition and used to verify that tf.Elu isn't decomposed // since its kernel has been registered. tfr.func @tf__elu_(%input: !tfr.tensor) -> !tfr.tensor { tfr.return %input : !tfr.tensor } // Translated from: // // REGISTER_OP("Add")
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 13 16:33:28 UTC 2021 - 4.2K bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad_test.cc
namespace tensorflow { namespace { using ops::AvgPool; using ops::AvgPool3D; using ops::BiasAdd; using ops::Conv2D; using ops::Conv2DBackpropInput; using ops::DepthwiseConv2dNative; using ops::Elu; using ops::FractionalAvgPool; using ops::FractionalMaxPool; using ops::FusedBatchNormV3; using ops::L2Loss; using ops::LogSoftmax; using ops::LRN; using ops::MaxPool; using ops::MaxPool3D; using ops::MaxPoolV2;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 22 20:45:22 UTC 2022 - 15K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/tests/decompose.mlir
%scale = arith.constant 0.1 : f32 %zp = arith.constant 42 : i64 %elu_attr = tfr.constant "ELU" -> !tfr.attr %min, %max = "tfr.quant_act_range"(%elu_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor) func.return %min, %max : !tfr.tensor, !tfr.tensor // CHECK: %[[elu_attr:.*]] = tfr.constant "ELU" -> !tfr.attr // CHECK: %[[min:.*]], %[[max:.*]] = tfr.quant_act_range(%[[elu_attr]]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 16.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td
def : Pat<(TF_OnesLikeOp AnyTensor:$arg), (MHLO_ConstantLike<"1"> $arg)>; //===----------------------------------------------------------------------===// // Elu op patterns. //===----------------------------------------------------------------------===// def : Pat<(TF_EluOp AnyTensor:$features), (MHLO_SelectOp (MHLO_CompareOp $features,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 06 18:46:23 UTC 2024 - 34.8K bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad.cc
std::vector<Output>* grad_outputs) { auto dx = internal::EluGrad(scope, grad_inputs[0], op.output(0)); grad_outputs->push_back(dx); return scope.status(); } REGISTER_GRADIENT_OP("Elu", EluGradHelper); Status SeluGradHelper(const Scope& scope, const Operation& op, const std::vector<Output>& grad_inputs, std::vector<Output>* grad_outputs) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 27 23:34:33 UTC 2022 - 24.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/test_schema.fbs
MIRROR_PAD = 100, ABS = 101, SPLIT_V = 102, UNIQUE = 103, CEIL = 104, REVERSE_V2 = 105, ADD_N = 106, GATHER_ND = 107, COS = 108, WHERE = 109, RANK = 110, ELU = 111, REVERSE_SEQUENCE = 112, MATRIX_DIAG = 113, QUANTIZE = 114, MATRIX_SET_DIAG = 115, ROUND = 116, HARD_SWISH = 117, IF = 118, WHILE = 119, NON_MAX_SUPPRESSION_V4 = 120,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 19 19:46:06 UTC 2021 - 26.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs
MIRROR_PAD = 100, ABS = 101, SPLIT_V = 102, UNIQUE = 103, CEIL = 104, REVERSE_V2 = 105, ADD_N = 106, GATHER_ND = 107, COS = 108, WHERE = 109, RANK = 110, ELU = 111, REVERSE_SEQUENCE = 112, MATRIX_DIAG = 113, QUANTIZE = 114, MATRIX_SET_DIAG = 115, ROUND = 116, HARD_SWISH = 117, IF = 118, WHILE = 119, NON_MAX_SUPPRESSION_V4 = 120,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 14:28:27 UTC 2024 - 30K bytes - Viewed (0)