Search Options

Results per page
Sort
Preferred Languages
Advance

Results 101 - 110 of 134 for conv_2d (0.17 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver_with_skipping.mlir

      %0 = "tf.Conv2D"(%output, %cst) <{data_format = "NHWC", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 2, 2, 1], use_cudnn_on_gpu = true}> {attr_map = "0:strides,1:use_cudnn_on_gpu,2:padding,3:explicit_paddings,4:dilations", device...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/replace_cast_hacks_with_tf_xla_ops_large_constants.mlir

        %3 = "tf.Cast"(%2) {Truncate = false} : (tensor<960x960x3x512xi8>) -> tensor<960x960x3x512xi32>
        %4 = "tf.Sub"(%3, %arg5) : (tensor<960x960x3x512xi32>, tensor<512xi32>) -> tensor<960x960x3x512xi32>
        %5 = "tf.Conv2D"(%1, %4) {dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<1x2240x2240x3xi32>, tensor<960x960x3x512xi32>) -> tensor<1x2240x1120x512xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 5.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.td

    def MultiplyFakeQuantValue : NativeCodeCall<
      "MultiplyFakeQuantValue($_builder, $_loc, $0...)">;
    
    // Convert AddV2Op following an AffineOp to BiasAddOp.
    // For Conv3D, even though the Conv3D op has "NDHWC" data format, the BiasAdd
    // will still has the data format of "NHWC".
    def ConvertAddToBiasAdd : Pat<
      (TF_AddV2Op
        (SupportedAffineOpMatcher $conv_out, $input, $weight),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize.mlir

      %dq_bias = "quantfork.dcast"(%q_bias) : (tensor<2x!quant.uniform<i32:f32, 0.044022349891595126>>) -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:32:28 UTC 2024
    - 6.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/tf-ops.mlir

      %0 = "tf.Conv2D"(%arg0, %arg1) {padding = "SAME", strides = [1, 1]} : (tensor<256x32x32x3xf32>, tensor<3x3x3x16xf32>) -> tensor<256x30x30x16xf32>
      func.return %0 : tensor<256x30x30x16xf32>
    }
    
    // -----
    
    func.func @testConv2D(%arg0: tensor<256x32x32x3xf32>, %arg1: tensor<3x3x3x16xf32>) -> tensor<256x30x30x16xf32> {
      // expected-error @+1 {{requires positive strides}}
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 23 14:40:35 UTC 2023
    - 236.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfr/examples/mnist/ops_defs.py

        derived_attrs=['T: {float, int8}'],
        outputs=['o: T'])
    def _composite_conv_add_relu(input_, filter_, bias, stride_w, stride_h,
                                 dilation_w, dilation_h, padding, act):
      res = tf.raw_ops.Conv2D(
          input=input_,
          filter=filter_,
          strides=[1, stride_w, stride_h, 1],
          dilations=[1, dilation_w, dilation_h, 1],
          padding=padding)
      res = tf.raw_ops.Add(x=res, y=bias)
      if act == 'RELU':
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Aug 31 20:23:51 UTC 2023
    - 6.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-prefer-tf2xla.mlir

      %input_scale = "tf.Const"() {value = dense<1.0> : tensor<f32>} : () -> tensor<f32>
      %side_input_scale = "tf.Const"() {value = dense<2.0> : tensor<f32>} : () -> tensor<f32>
      %conv2d = "tf._FusedConv2D"(%input, %filter, %bias, %act, %input_scale, %side_input_scale) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 15.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/tf_to_corert_pipeline.mlir

          %outputs_6, %control_7 = tf_executor.island wraps "tf.Const"() {device = "", value = dense<[-1, 16384]> : tensor<2xi32>} : () -> tensor<2xi32>
          %outputs_8, %control_9 = tf_executor.island wraps "tf.Conv2D"(%arg0, %outputs_0) {data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 2, 2, 1], use_cudnn_on_gpu = true} : (tensor<16x224x224x3xf32>, tensor<*xf32>) -> tensor<16x112x112x?xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 7.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir

    // func.func func_name_${key1}_fn (...) {
    //   ...${key2}...
    // }
    // ```
    // The above template with generate two functions by substituting `key1` and
    // `key2` with given values.
    
    module {
    
      for main_op in ["Conv2D", "DepthwiseConv2D", "MatMul"] {
        parameters[
          {"quantized_ops": ["${main_op}", "BiasAdd"], "act_func": "internal_requantize_no_activation_fn", "output_type": "!tf_type.qint8"},
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 29 01:13:58 UTC 2023
    - 19.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tf_tfl_passes.cc

      // Canonicalization includes const folding, which is utilized here to optimize
      // away ops that can't get constant folded after PrepareTF pass. For example,
      // tf.Conv2D is split into tf.Transpose and tfl.Conv2D.
      pass_manager->addNestedPass<mlir::func::FuncOp>(
          mlir::createCanonicalizerPass());
      pass_manager->addNestedPass<mlir::func::FuncOp>(mlir::createCSEPass());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 25.5K bytes
    - Viewed (0)
Back to top