- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 66 for conv4 (0.05 sec)
-
tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir
// CHECK: %[[CONV:.*]] = "tf.Conv2D"(%[[SLICED_ARG0]], %[[ARG1]]) // CHECK-SAME: explicit_paddings = [0, 0, 4, 0, 0, 2, 0, 0] // CHECK-SAME: (tensor<128x5x4x64xf32>, tensor<3x2x64x4xf32>) -> tensor<128x4x3x4xf32> // CHECK: return %[[CONV]] : tensor<128x4x3x4xf32> // CHECK: }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 07:26:59 UTC 2024 - 340.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h
// Checks if the `Method` attatched to the given `tf.XlaCallModule` op has // `WeightOnlyPtq`. bool HasWeightOnlyPtqMethod(TF::XlaCallModuleOp xla_call_module_op); // Checks if an op is a `tf.XlaCallModule` op, contains 'conv' or 'dot_general' // in its name and has `Method` with `WeightOnlyPtq`. bool IsWeightOnlyQuantizableOp(const Operation& op); // Lists the functions in a ModuleOp sorted by their names.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc
return spec; } QuantizationSpec GetDefaultWeightOnlyPtqSpec() { QuantizationSpec spec{}; spec.mutable_matcher()->mutable_function_name()->set_regex( "^.*(conv|dot_general).*"); WeightOnlyPtq& weight_only_ptq_spec = *spec.mutable_method()->mutable_weight_only_ptq(); if (auto [iter, inserted] = weight_only_ptq_spec.mutable_input_quantized_types()->try_emplace(1);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 8.3K bytes - Viewed (0) -
src/cmd/internal/goobj/builtinlist.go
{"runtime.slicerunetostring", 1}, {"runtime.stringtoslicebyte", 1}, {"runtime.stringtoslicerune", 1}, {"runtime.slicecopy", 1}, {"runtime.decoderune", 1}, {"runtime.countrunes", 1}, {"runtime.convT", 1}, {"runtime.convTnoptr", 1}, {"runtime.convT16", 1}, {"runtime.convT32", 1}, {"runtime.convT64", 1}, {"runtime.convTstring", 1}, {"runtime.convTslice", 1}, {"runtime.assertE2I", 1},
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 21 21:08:03 UTC 2024 - 7.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel.pbtxt
# MLIR: %[[conv:.*]] = "tfl.conv_2d"(%[[ARG_0]], %[[weight]], %[[bias]]) <{dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32} # MLIR: %[[reshape:.*]] = "tfl.reshape"(%[[conv]], %[[shape]]) : (tensor<1x1x1x186x!quant.uniform<i8:f32, 0.09363494573854933:22>>, tensor<3xi32>)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 18.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel_4bit.pbtxt
# MLIR: %[[conv:.*]] = "tfl.conv_2d"(%[[ARG_0]], %[[weight]], %[[bias]]) <{dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32} # MLIR: %[[reshape:.*]] = "tfl.reshape"(%[[conv]], %[[shape]]) : (tensor<1x1x1x186x!quant.uniform<i8:f32, 0.09363494573854933:22>>, tensor<3xi32>)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 18.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.cc
StringRef function_name = entry_function.getValue(); if (!function_name.starts_with("composite_")) { return spec; } if (function_name.contains("conv")) { // Looks up `Method` to see if it should be per-channel quantized and // populates the spec accordingly. PopulateCoeffOpQuantDimIfPerChannelQuantized(call_op, *spec);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/config_test.cc
ASSERT_THAT(new_config.specs().specs(), SizeIs(1)); const QuantizationSpec& spec = new_config.specs().specs(0); EXPECT_THAT(spec.matcher().function_name().regex(), StrEq("^.*(conv|dot_general).*")); EXPECT_TRUE(spec.method().has_weight_only_ptq()); const WeightOnlyPtq& weight_only_ptq_spec = spec.method().weight_only_ptq(); EXPECT_THAT(weight_only_ptq_spec.input_quantized_types(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 06:59:34 UTC 2024 - 12K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_weight_param.cc
const auto module_op = op->getParentOfType<ModuleOp>(); const SymbolTable symbol_table(module_op); func::FuncOp func = symbol_table.lookup<func::FuncOp>(function_name); if (function_name.contains("conv")) { return (*(func.getOps<mlir::stablehlo::ConvolutionOp>().begin())) .getDimensionNumbers() .getKernelOutputFeatureDimension(); } else if (function_name.contains("dot_general")) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 10.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/post-quantize.mlir
// CHECK-NEXT: %[[conv:.*]] = "tfl.conv_2d"(%arg0, %[[q_cst_0]], %[[q_cst_1]]) <{dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 2 : i32, stride_w = 2 : i32}> // CHECK-NEXT: %[[reshape:.*]] = "tfl.reshape"(%[[conv]], %[[cst]]) : (tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>, tensor<2xi32>)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 19.9K bytes - Viewed (0)