- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 12 for CONV (0.03 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/lift_quantizable_spots_as_functions.mlir
// CHECK: } // CHECK-LABEL: private @composite_conv_with_bias_dynamic_fn_1 // CHECK: %[[CONV:.*]] = stablehlo.convolution(%arg0, %arg1) // CHECK: %[[SHAPE_OF:.*]] = shape.shape_of %[[CONV]] // CHECK: %[[DYNAMIC_BROADCAST_IN_DIM:.*]] = stablehlo.dynamic_broadcast_in_dim %arg2, %[[SHAPE_OF]] // CHECK: %[[ADD:.*]] = stablehlo.add %[[CONV]], %[[DYNAMIC_BROADCAST_IN_DIM]] // CHECK: return %[[ADD]] : tensor<?x28x28x16xf32> // CHECK: }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 49.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir
%conv = "tfl.conv_2d"(%input, %weight, %bias) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<1x224x224x3xf32>, tensor<32x3x3x3xf32>, tensor<32xf32>) -> tensor<1x112x112x32xf32> func.return %conv : tensor<1x112x112x32xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 67.5K bytes - Viewed (0) -
platforms/core-configuration/model-core/src/test/groovy/org/gradle/internal/instantiation/generator/AsmBackedClassGeneratorTest.java
Convention conv = new ExtensibleDynamicObject(this, DynamicObjectAwareBean.class, TestUtil.instantiatorFactory().decorateLenient()).getConvention(); public Convention getConvention() { return conv; } public ExtensionContainer getExtensions() { return conv; }
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu Oct 05 19:36:14 UTC 2023 - 74.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py
# One pure conv out = nn_ops.conv2d( out, self.conv_filters, strides=(1, 1, 2, 1), dilations=(1, 1, 1, 1), padding='SAME', data_format='NHWC', ) # One fakequant attached conv if is_qat_model: out = array_ops.fake_quant_with_min_max_args(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 08:51:46 UTC 2024 - 51.2K bytes - Viewed (0) -
src/cmd/cgo/gcc.go
conv.getTypeIDs[n.Go[:len(n.Go)-9]] = true } } for i, n := range names { if types[i] == nil { continue } pos := f.NamePos[n] f, fok := types[i].(*dwarf.FuncType) if n.Kind != "type" && fok { n.Kind = "func" n.FuncType = conv.FuncType(f, pos) } else { n.Type = conv.Type(types[i], pos) switch n.Kind { case "iconst":
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 20 15:50:06 UTC 2024 - 97K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/replace_cast_hacks_with_tf_xla_ops.mlir
// CHECK: %[[PAD:.*]] = "tf.PadV2"({{.*}}, %[[CONST]], %[[CONST_1]]) // CHECK: %[[CONV:.*]] = "tf.XlaConvV2"(%[[PAD]], %[[WEIGHT]] // CHECK-SAME: (tensor<1x4x5x5x3xi8>, tensor<2x3x3x3x2xi8>, tensor<3xi32>, tensor<3x2xi32>, tensor<3xi32>, tensor<3xi32>, tensor<i32>) -> tensor<1x3x2x3x2xi32> // CHECK: %[[SUB:.*]] = "tf.Sub"(%[[CONV]], %[[CONST_2]]) } // ----- module attributes {tf_saved_model.semantics} {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 81K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
auto pad_output_type = UnrankedTensorType::get(elem_type); input = rewriter.create<TF::PadOp>(op->getLoc(), pad_output_type, input, padding_const); // Set Conv padding to `VALID` since padding has been handled by Pad op. state.padding = rewriter.getStringAttr("VALID"); } auto conv_op = static_cast<const ConcreteType *>(this)->createTFLOp(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 64.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} { func.func @conv(tensor<256x32x32x3xf32>, tensor<3x3x3x16xf32>, tensor<256x3x32x32xf32>) -> (tensor<256x8x7x16xf32>, tensor<256x16x32x32xf32>, tensor<256x8x6x16xf32>, tensor<256x32x32x16xf32>, tensor<256x32x32x16xf32>) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 07:26:59 UTC 2024 - 59.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td
ConstBoolAttrTrue, $asymmetric_quantize_inputs), [(HasRank<2> $input), (AreLastTwoDimsTransposed $perm_value), (IsBoolAttrEqual<"false"> $adj_y)]>; // Replace conv-->transpose-->add with conv-->add-->transpose // The bias needs only reshape (i.e. ReshapeNCHWBiasToNHWC) and not transpose // because the bias's shape simply changes from NxCx1x1 to Nx1x1xC. def ReorderNCHWTransposeAdd : Pat <
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 66.4K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass.cc
{"BN", {"FusedBatchNorm", "FusedBatchNormV2", "FusedBatchNormV3", "_FusedBatchNormEx", "FusedBatchNormGrad", "FusedBatchNormGradV2", "FusedBatchNormGradV3"}}, {"Conv", {"_FusedConv2D"}}, {"SORT", {"TopKV2"}}, // XLA version much faster then TF version. {"MISC", // clang-format off {"ApproxTopK", "BroadcastTo", "ExpandDims", "Fill", "NoOp",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 85.3K bytes - Viewed (0)