- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 108 for Selu (0.07 sec)
-
tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/basic_lstm.mlir
// CHECK-NEXT: outputs: [ 5, 6, 7, 8 ], // CHECK-NEXT: builtin_options_type: LSTMOptions, // CHECK-NEXT: builtin_options: { // CHECK-NEXT: fused_activation_function: RELU, // CHECK-NEXT: cell_clip: 1.0, // CHECK-NEXT: proj_clip: 2.0, // CHECK-NEXT: kernel_type: BASIC // CHECK-NEXT: }, // CHECK-NEXT: intermediates: [ ] // CHECK-NEXT: } ],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 14 16:41:28 UTC 2022 - 4.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/canonicalize.td
// Canonicalize tf.Maximum of zero to tf.Relu //===----------------------------------------------------------------------===// def IsInteger32Pred: CPred< "getElementTypeOrSelf($0.getType()).isInteger(32)">; // Whether the transformation is compatible with the device if given. // Currently, Relu with int32 is not supported on GPU. def IsDeviceCompatible: Constraint<
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Dec 06 18:42:28 UTC 2023 - 17K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/optimize.mlir
// Fusing: %[[add1:[0-9].*]] = tfl.add %arg0, %[[add]] {fused_activation_function = "RELU"} : tensor<1xf32> // Fusing: %[[relu:[0-9].*]] = "tfl.relu"(%arg0) : (tensor<1xf32>) -> tensor<1xf32> // Fusing: %[[add2:[0-9].*]] = tfl.add %[[relu]], %[[add1]] {fused_activation_function = "RELU6"} : tensor<1xf32> // Fusing: %[[add3:[0-9].*]] = tfl.add %[[add2]], %[[relu]] {fused_activation_function = "RELU6"} : tensor<1xf32> // Fusing: return
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 284.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir
{"quantized_ops": ["${main_op}", "Relu"], "act_func": "internal_requantize_and_relu_fn", "output_type": "i8"}, {"quantized_ops": ["${main_op}", "Relu6"], "act_func": "internal_requantize_and_relu6_fn", "output_type": "i8"}, {"quantized_ops": ["${main_op}"], "act_func": "internal_dequantize_no_activation_fn", "output_type": "f32"},
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 30.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/decompose_patterns.td
(TFR_ConstantTensorOp (Arith_ConstantOp ConstantAttr<I32Attr, "127">))]>; def QuantActRangeReluPattern : Pattern< (TFR_TFRQuantActRangeOp (TFR_ConstOp HasStringAttr<"RELU">:$act), (ConstantLikeMatcher F32Attr:$scale), (ConstantLikeMatcher I64Attr:$zp)), [(TFR_ConstantTensorOp (Arith_ConstantOp (Quantize<"0.0f"> $scale, $zp))),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Sep 29 21:02:21 UTC 2022 - 2.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir
// CHECK: tac.device = "GPU", tac.inference_type = "FLOAT" %1 = "tfl.add"(%arg0, %0) {fused_activation_function = "RELU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT" %2 = "tfl.relu"(%arg0) : (tensor<1xf32>) -> tensor<1xf32> // CHECK: tac.device = "CPU", tac.inference_type = "FLOAT"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 19 19:32:06 UTC 2023 - 6.2K bytes - Viewed (0) -
tensorflow/c/experimental/ops/update_cpp_ops.sh
MatMul \ Neg \ Sum \ Sub \ Div \ DivNoNan \ Exp \ Sqrt \ SqrtGrad \ Log1p ${generate} \ --category=nn \ SparseSoftmaxCrossEntropyWithLogits \ ReluGrad \ Relu \ BiasAdd \ BiasAddGrad ${generate} \ --category=resource_variable \ VarHandleOp \ ReadVariableOp \ AssignVariableOp \ DestroyResourceOp ${generate} \ --category=io \
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 17 17:54:34 UTC 2022 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/keras.py
def mnist_model(): """Creates a MNIST model.""" model = tf.keras.models.Sequential() model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation='relu')) model.add(tf.keras.layers.Dense(10, activation='softmax')) return model class TestModule(tf.Module): def __init__(self): super(TestModule, self).__init__() self.model = mnist_model()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 28 21:37:05 UTC 2021 - 1.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/insert_fallback_tensor_copy.mlir
// CHECK-NOT: tfrt_fallback_async.copy_if_small %0 = tfrt_fallback_async.executeop key(0) cost(1024) device("/job:localhost/replica:0/task:0/device:CPU:0") "tf.Relu"(%arg) {T = f32} : 1 %1 = tfrt_fallback_async.executeop key(0) cost(1024) device("/job:localhost/replica:0/task:0/device:CPU:0") "tf.Relu"(%arg) {T = f32} : 1 tfrt.return %0, %1 : !tfrt_fallback.tf_tensor, !tfrt_fallback.tf_tensor
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 25 10:51:48 UTC 2022 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema.fbs
HASHTABLE_LOOKUP = 10, L2_NORMALIZATION = 11, L2_POOL_2D = 12, LOCAL_RESPONSE_NORMALIZATION = 13, LOGISTIC = 14, LSH_PROJECTION = 15, LSTM = 16, MAX_POOL_2D = 17, MUL = 18, RELU = 19, // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed // since different model developers use RELU1 in different ways. Never // create another op called RELU1. RELU_N1_TO_1 = 20,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 41.7K bytes - Viewed (0)