Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 82 for conv2 (0.04 sec)

  1. test/inline.go

    }
    
    // Ensure OCONVNOP is zero cost.
    func Conv(v uint64) uint64 { // ERROR "can inline Conv"
    	return conv2(conv2(conv2(v))) // ERROR "inlining call to (conv1|conv2)"
    }
    func conv2(v uint64) uint64 { // ERROR "can inline conv2"
    	return conv1(conv1(conv1(conv1(v)))) // ERROR "inlining call to conv1"
    }
    func conv1(v uint64) uint64 { // ERROR "can inline conv1"
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Oct 19 23:33:25 UTC 2023
    - 11.7K bytes
    - Viewed (0)
  2. test/newinline.go

    }
    
    // Ensure OCONVNOP is zero cost.
    func Conv(v uint64) uint64 { // ERROR "can inline Conv"
    	return conv2(conv2(conv2(v))) // ERROR "inlining call to (conv1|conv2)"
    }
    func conv2(v uint64) uint64 { // ERROR "can inline conv2"
    	return conv1(conv1(conv1(conv1(v)))) // ERROR "inlining call to conv1"
    }
    func conv1(v uint64) uint64 { // ERROR "can inline conv1"
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Nov 16 20:15:25 UTC 2023
    - 11.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

      %b2 = arith.constant dense<[1.0e-2, 2.1473647e1, -2.1473647e2]> : tensor<3xf32>
      %conv = "tfl.conv_2d"(%0, %w, %b) {
        dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU",
        padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32
      } : (tensor<1x5x5x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<1x5x5x3xf32>
      %conv2 = "tfl.conv_2d"(%0, %w, %b2) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/dilated-conv.mlir

      // CHECK-NEXT: [[CONV:%.*]] = "tf.Conv2D"([[INPUT]], [[FILTER]]) <{dilations = [1, 2, 2, 1], padding = "SAME", strides = [1, 1, 1, 1]}> : (tensor<1x128x128x3xf32>, tensor<5x5x3x8xf32>) -> tensor<1x128x128x8xf32>
      // CHECK-NEXT: [[RESULT:%.*]] = "tf.BiasAdd"([[CONV]], [[BIAS]]) : (tensor<1x128x128x8xf32>, tensor<8xf32>) -> tensor<1x128x128x8xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 44.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir

    // CHECK: %[[CONV2D:.*]] = "tf.Conv2D"(%arg0, %[[CONST]]) <{data_format = "NHWC", dilations = [1, 1, 2, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true}> : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<1x3x2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op.mlir

        %1 = "tf.PartitionedCall"(%arg0, %cst, %cst_0) {_tfl_quant_trait = "fully_quantizable", config = "", config_proto = "", executor_type = "", f = @composite_conv2d_with_bias_and_relu6_fn_1} : (tensor<1x2x2x3xf32>, tensor<2x2x3x2xf32>, tensor<2xf32>) -> tensor<*xf32> loc(callsite("test@conv"("Conv2D_1") at "QuantizationUnit(\12\08Conv2D_1\1a\04conv)"))
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.td

       (IsInt8ElementType $filter),
       (IsConstTensor $filter),
       (IsInt32ElementType $conv),
       (HasStaticShapeConstraint $filter),
       (HasStaticShapeAtDimsConstraint<"3"> $input)],
      [], (addBenefit 10)>;
    
    // Convert Conv2D with hybrid inputs (f32 activation/int8 weight) to XlaConv
    def ConvertTFConv2DToXLAConvOpWeightOnly : Pat<
      (TF_Conv2DOp:$conv
        $input,
        (TF_MulOp (TF_CastOp (TF_IdentityOp $filter), $truncate1), $scale),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 21.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_drq.mlir

        %conv = "tf.Conv2D"(%arg0, %arg1) {attr_map = "0:strides,1:use_cudnn_on_gpu,2:padding,3:explicit_paddings,4:dilations", data_format = "NHWC", device = "", dilations = [1, 2, 2, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<1x2x2x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32>
        return %conv : tensor<*xf32>
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 9.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/fused_kernel_matcher.mlir

      // CHECK-NOT: "tf._FusedConv2D"
      %0 = "tf.Conv2D"(%arg2, %arg1) <{data_format = "NHWC", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true}> : (tensor<8x32x32x3xf32>, tensor<1x1x3x128xf32>) -> tensor<*xf32>
      // The result of the conv must be the first input to BiasAdd to be fusable.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel_4bit.pbtxt

        key: "narrow_range"
        value {
          b: true
        }
      }
      attr {
        key: "num_bits"
        value {
          i: 4
        }
      }
    }
    node {
      name: "BoxPredictor_4/ClassPredictor/Conv2D"
      op: "Conv2D"
      input: "input"
      input: "BoxPredictor_4/ClassPredictor/weights_quant/FakeQuantWithMinMaxVarsPerChannel"
      attr {
        key: "T"
        value {
          type: DT_FLOAT
        }
      }
      attr {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.1K bytes
    - Viewed (0)
Back to top