Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 159 for _conv (0.04 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/nchw_convolution_to_nhwc.mlir

    // CHECK: %[[CONV:.+]] = stablehlo.convolution(%[[TRANSPOSE_0]], %[[TRANSPOSE_1]]) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = {{\[\[}}1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x4x4x8xf32>, tensor<3x3x8x8xf32>) -> tensor<1x4x4x8xf32>
    // CHECK: %[[TRANSPOSE_2:.+]] = stablehlo.transpose %[[CONV]], dims = [0, 3, 1, 2] : (tensor<1x4x4x8xf32>) -> tensor<1x8x4x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 25 23:00:47 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize.mlir

      %conv = "tf.Conv2D"(%dq_input, %dq_weight) {attr_map = "0:strides,1:use_cudnn_on_gpu,2:padding,3:explicit_paddings,4:dilations", data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "VALID", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:32:28 UTC 2024
    - 6.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_weight_only.mlir

        %conv = "tf.Conv2D"(%arg0, %arg1) {attr_map = "0:strides,1:use_cudnn_on_gpu,2:padding,3:explicit_paddings,4:dilations", data_format = "NHWC", device = "", dilations = [1, 2, 2, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<1x2x2x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32>
        return %conv : tensor<*xf32>
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.3K bytes
    - Viewed (0)
  4. src/cmd/internal/obj/x86/asm_test.go

    		var p obj.Prog
    		for _, test := range tests {
    			have := oclass(ctxt, &p, test.arg)
    			if have != test.want {
    				t.Errorf("oclass(%q):\nhave: %d\nwant: %d",
    					obj.Dconv(&p, test.arg), have, test.want)
    			}
    		}
    	}
    
    	// TODO(quasilyte): test edge cases for Hsolaris, etc?
    
    	t.Run("linux/AMD64", func(t *testing.T) {
    		ctxtAMD64 := obj.Linknew(&Linkamd64)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jul 28 19:39:51 UTC 2023
    - 9.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/transforms/fuse_convolution_pass.cc

                    "non-broadcastable operands";
          });
        }
        filter_value = filter.getValue();
        mul_value = multiplier.getValue();
        // In MHLO, Conv filter is in HWIO format, Depthwise conv filter is in HW1O
        // format and backprop input conv filter is in HWOI format.
        // Only fuses multiplier if all dimensions other than the out channel
        // dimension are equal to 1.
        if (!TFL::IsDimensionsDegenerateExceptLastOne(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 22:21:19 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.td

      [(HasRankOf<1> $add_rhs_value),
       (HasEqualElementSize<[-1], [0]> $conv_out, $add_rhs)], [], (addBenefit -1)>;
    
    // Convert conv+sub+mul pattern to conv+mul+add.
    // (conv - sub) * mul -> conv * mul + (-sub) * mul
    //
    // This is needed to support Conv+BatchNorm pattern from Jax models converted
    // using jax2tf w/o native serialization. Note that Jax2tf patterns always
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_drq.mlir

        %conv = "tf.Conv2D"(%arg0, %arg1) {attr_map = "0:strides,1:use_cudnn_on_gpu,2:padding,3:explicit_paddings,4:dilations", data_format = "NHWC", device = "", dilations = [1, 2, 2, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<1x2x2x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32>
        return %conv : tensor<*xf32>
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 9.8K bytes
    - Viewed (0)
  8. test/fixedbugs/bug027.go

    2 222
    3 11
    4 0
    0 44444
    1 3333
    2 222
    3 11
    4 0
    `
    	if r != expect {
    		panic(r)
    	}
    }
    
    /*
    bug027.go:50: illegal types for operand
    	(<Element>I{}) CONV (<I>{})
    bug027.go:50: illegal types for operand
    	(<Element>I{}) CONV (<I>{})
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Feb 17 04:48:57 UTC 2012
    - 1.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_xla.mlir

      %conv = "tf.Conv2D"(%dq_input, %dq_weight) {attr_map = "0:strides,1:use_cudnn_on_gpu,2:padding,3:explicit_paddings,4:dilations", data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "VALID", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:32:28 UTC 2024
    - 11.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/tf_to_quant.mlir

    // CHECK: %[[DEQUANTIZE:.*]] = "quantfork.dcast"(%[[QUANTIZE]])
    // CHECK: %[[CONV:.*]] = "tf.Conv2D"(%arg0, %[[DEQUANTIZE]])
    // CHECK: return %[[CONV]]
    }
    
    // CHECK-LABEL: perChannelFakeQuantWithConv2D
    func.func @perChannelFakeQuantWithConv2D(tensor<256x32x32x3xf32>) -> (tensor<256x8x7x16xf32>) {
    ^bb0(%arg: tensor<256x32x32x3xf32>) :
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 9.5K bytes
    - Viewed (0)
Back to top