Search Options

Results per page
Sort
Preferred Languages
Advance

Results 81 - 90 of 134 for DRconv (0.15 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td

            "MLIR dump file name.">,
        Option<"merge_fusion_with_dequantize_",
            "merge-fusion-with-dequantize",
            "bool", /*default=*/"false",
            "Whether to merge quantized conv/dot_general fusion with subsequent dequantize.">,
      ];
      let dependentDialects = [
        "mlir::arith::ArithDialect",
        "mlir::stablehlo::StablehloDialect",
        "mlir::quant::QuantizationDialect",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 10.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op.mlir

        %1 = "tf.PartitionedCall"(%arg0, %cst, %cst_0) {_tfl_quant_trait = "fully_quantizable", config = "", config_proto = "", executor_type = "", f = @composite_conv2d_with_bias_and_relu6_fn_1} : (tensor<1x2x2x3xf32>, tensor<2x2x3x2xf32>, tensor<2xf32>) -> tensor<*xf32> loc(callsite("test@conv"("Conv2D_1") at "QuantizationUnit(\12\08Conv2D_1\1a\04conv)"))
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/fused_kernel_matcher.mlir

      // The result of the conv must be the first input to BiasAdd to be fusable.
      %1 = "tf.BiasAdd"(%arg0, %0) {data_format = "NHWC"} : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
      %2 = "tf.Elu"(%1) : (tensor<*xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.2K bytes
    - Viewed (0)
  4. src/cmd/asm/internal/asm/testdata/mips64.s

    //	LMOVW ximm ',' rreg
    //	{
    //		outcode(int($1), &$2, 0, &$4);
    //	}
    	MOVW	$1, R1
    	MOVW	$foo(SB), R1
    	MOVV	$1, R1
    	MOVV	$foo(SB), R1
    
    //
    // floating point operate
    //
    //	LFCONV freg ',' freg
    //	{
    //		outcode(int($1), &$2, 0, &$4);
    //	}
    	ABSD	F1, F2
    
    //	LFADD freg ',' freg
    //	{
    //		outcode(int($1), &$2, 0, &$4);
    //	}
    	ADDD	F1, F2
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Aug 08 12:17:12 UTC 2023
    - 12.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc

      return spec;
    }
    
    QuantizationSpec GetDefaultWeightOnlyPtqSpec() {
      QuantizationSpec spec{};
      spec.mutable_matcher()->mutable_function_name()->set_regex(
          "^.*(conv|dot_general).*");
    
      WeightOnlyPtq& weight_only_ptq_spec =
          *spec.mutable_method()->mutable_weight_only_ptq();
      if (auto [iter, inserted] =
              weight_only_ptq_spec.mutable_input_quantized_types()->try_emplace(1);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h

    // Checks if the `Method` attatched to the given `tf.XlaCallModule` op has
    // `WeightOnlyPtq`.
    bool HasWeightOnlyPtqMethod(TF::XlaCallModuleOp xla_call_module_op);
    
    // Checks if an op is a `tf.XlaCallModule` op, contains 'conv' or 'dot_general'
    // in its name and has `Method` with `WeightOnlyPtq`.
    bool IsWeightOnlyQuantizableOp(const Operation& op);
    
    // Lists the functions in a ModuleOp sorted by their names.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel.pbtxt

    # MLIR:         %[[conv:.*]] = "tfl.conv_2d"(%[[ARG_0]], %[[weight]], %[[bias]]) <{dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32}
    # MLIR:         %[[reshape:.*]] = "tfl.reshape"(%[[conv]], %[[shape]]) : (tensor<1x1x1x186x!quant.uniform<i8:f32, 0.09363494573854933:22>>, tensor<3xi32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.1K bytes
    - Viewed (0)
  8. test/newinline.go

    // these tests pass with `-gcflags=-l`.
    func float32bits(f float32) uint32 { // ERROR "can inline float32bits"
    	return *(*uint32)(unsafe.Pointer(&f))
    }
    
    // Ensure OCONVNOP is zero cost.
    func Conv(v uint64) uint64 { // ERROR "can inline Conv"
    	return conv2(conv2(conv2(v))) // ERROR "inlining call to (conv1|conv2)"
    }
    func conv2(v uint64) uint64 { // ERROR "can inline conv2"
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Nov 16 20:15:25 UTC 2023
    - 11.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel_4bit.pbtxt

    # MLIR:         %[[conv:.*]] = "tfl.conv_2d"(%[[ARG_0]], %[[weight]], %[[bias]]) <{dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32}
    # MLIR:         %[[reshape:.*]] = "tfl.reshape"(%[[conv]], %[[shape]]) : (tensor<1x1x1x186x!quant.uniform<i8:f32, 0.09363494573854933:22>>, tensor<3xi32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.1K bytes
    - Viewed (0)
  10. src/cmd/compile/internal/typecheck/func.go

    		// pick off before type-checking arguments
    		arg, ok := needOneArg(n, "conversion to %v", l.Type())
    		if !ok {
    			n.SetType(nil)
    			return n
    		}
    
    		n := ir.NewConvExpr(n.Pos(), ir.OCONV, nil, arg)
    		n.SetType(l.Type())
    		return tcConv(n)
    	}
    
    	RewriteNonNameCall(n)
    	typecheckargs(n)
    	t := l.Type()
    	if t == nil {
    		n.SetType(nil)
    		return n
    	}
    	types.CheckSize(t)
    
    	switch l.Op() {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Mar 06 15:23:18 UTC 2024
    - 21.1K bytes
    - Viewed (0)
Back to top