Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 79 for RELU (0.39 sec)

  1. tensorflow/compiler/jit/tests/keras_imagenet_main.golden_summary

     Conv2D 53
     Conv2DBackpropFilter 53
     Conv2DBackpropInput 52
     Equal 1
     FusedBatchNormGradV2 53
     FusedBatchNormV2 53
     MatMul 3
     MaxPool 1
     MaxPoolGrad 1
     Mean 1
     Mul 218
     Pad 2
     ReadVariableOp 538
     Relu 49
     ReluGrad 49
     Reshape 2
     ResourceApplyKerasMomentum 161
     Slice 1
     Softmax 1
     SparseSoftmaxCrossEntropyWithLogits 1
     Squeeze 1
     Sum 1
     Tile 1
     Transpose 1
    cluster 1 size 815
     AddN 1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 06 10:38:14 UTC 2023
    - 874 bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/basic_lstm.mlir

    // CHECK-NEXT:      outputs: [ 5, 6, 7, 8 ],
    // CHECK-NEXT:      builtin_options_type: LSTMOptions,
    // CHECK-NEXT:      builtin_options: {
    // CHECK-NEXT:        fused_activation_function: RELU,
    // CHECK-NEXT:        cell_clip: 1.0,
    // CHECK-NEXT:        proj_clip: 2.0,
    // CHECK-NEXT:        kernel_type: BASIC
    // CHECK-NEXT:      },
    // CHECK-NEXT:      intermediates: [ ]
    // CHECK-NEXT:    } ],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 4.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/canonicalize.td

    // Canonicalize tf.Maximum of zero to tf.Relu
    //===----------------------------------------------------------------------===//
    
    def IsInteger32Pred: CPred<
      "getElementTypeOrSelf($0.getType()).isInteger(32)">;
    
    // Whether the transformation is compatible with the device if given.
    // Currently, Relu with int32 is not supported on GPU.
    def IsDeviceCompatible: Constraint<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 06 18:42:28 UTC 2023
    - 17K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/fused_kernel_matcher.mlir

      // CHECK: %[[VAL_0:.*]] = "tf._FusedConv2D"(%arg2, %arg1, %arg0) <{data_format = "NHWC", dilations = [1, 1, 1, 1], epsilon = 0.000000e+00 : f32, explicit_paddings = [], fused_ops = ["BiasAdd", "Relu"], num_args = 1 : i64, operandSegmentSizes = array<i32: 1, 1, 1, 0>, padding = "SAME", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true}> {TArgs = [f32]} : (tensor<8x32x32x3xf32>, tensor<1x1x3x128xf32>, tensor<128xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 30.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfr/passes/decompose_patterns.td

         (TFR_ConstantTensorOp (Arith_ConstantOp ConstantAttr<I32Attr, "127">))]>;
    
    def QuantActRangeReluPattern :
      Pattern<
        (TFR_TFRQuantActRangeOp
         (TFR_ConstOp HasStringAttr<"RELU">:$act),
         (ConstantLikeMatcher F32Attr:$scale),
         (ConstantLikeMatcher I64Attr:$zp)),
        [(TFR_ConstantTensorOp (Arith_ConstantOp (Quantize<"0.0f"> $scale, $zp))),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Sep 29 21:02:21 UTC 2022
    - 2.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir

       // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT"
      %1 = "tfl.add"(%arg0, %0) {fused_activation_function = "RELU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
       // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT"
      %2 = "tfl.relu"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
      // CHECK: tac.device = "CPU", tac.inference_type = "FLOAT"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 19 19:32:06 UTC 2023
    - 6.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py

          # If present the last op before return should be stablehlo.clamp for relu6
          # and stablehlo.maximum for relu.
          if activation_fn is nn_ops.relu6:
            self.assertRegex(module_str, r'stablehlo.clamp.*\n.*return')
          elif activation_fn is nn_ops.relu:
            self.assertRegex(module_str, r'stablehlo.maximum.*\n.*return')
        else:
          # Check activation functions are implicit.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 51.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver_test.cc

          %0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<1x4x4x3xf32>, tensor<3x1x1x3xf32>, tensor<3xf32>) -> tensor<1x4x4x3xf32>
          return %0 : tensor<1x4x4x3xf32>
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions.mlir

    // CHECK: func private @quantized_conv2d_with_relu6_fn
    // CHECK: func private @quantized_depthwise_conv2d_with_bias_and_relu_float_output_fn
    // CHECK-SAME: tf_quant.quantized_ops = ["DepthwiseConv2D", "BiasAdd", "Relu"]
    // CHECK: func private @quantized_matmul_with_bias_fn
    // CHECK: func private @quantized_matmul_with_bias_and_relu_fn
    // CHECK: func private @quantized_matmul_with_bias_and_relu6_fn
    // CHECK: func private @quantized_matmul_fn
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 29 01:13:58 UTC 2023
    - 3.3K bytes
    - Viewed (0)
Back to top