- Sort Score
- Result 10 results
- Languages All
Results 81 - 90 of 118 for conv4 (0.09 sec)
-
tensorflow/compiler/mlir/lite/tests/post-quantize.mlir
// CHECK-NEXT: %[[conv:.*]] = "tfl.conv_2d"(%arg0, %[[q_cst_0]], %[[q_cst_1]]) <{dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 2 : i32, stride_w = 2 : i32}> // CHECK-NEXT: %[[reshape:.*]] = "tfl.reshape"(%[[conv]], %[[cst]]) : (tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>, tensor<2xi32>)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 19.9K bytes - Viewed (0) -
src/cmd/compile/internal/walk/order.go
ir.ORECOVERFP, ir.OSTR2BYTES, ir.OSTR2BYTESTMP, ir.OSTR2RUNES: if isRuneCount(n) { // len([]rune(s)) is rewritten to runtime.countrunes(s) later. conv := n.(*ir.UnaryExpr).X.(*ir.ConvExpr) conv.X = o.expr(conv.X, nil) } else { o.call(n) } if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting { return o.copyExpr(n) } return n
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 08 02:00:33 UTC 2024 - 42.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
input_shape=(1, 3, 4, 3), filter_shape=(2, 3, 3, 2) ) signatures = { 'serving_default': model.conv.get_concrete_function(), } save_opts = save_options.SaveOptions( function_aliases={'conv_func': model.conv} ) saved_model_save.save( model, self._input_saved_model_path, signatures, save_opts )
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h
// FakeQuant). bool disable_per_channel = false; // Whether to disable per-channel weight quantization and enable legacy per // tensor quantization. The legacy quantization for Dense layers is // inconsistent with Conv 1x1 which always performs per channel quantization. bool disable_per_channel_for_dense_layers = false; // Whether to use fixed output ranges of the activation ops (tanh, sigmoid, // etc.) and not infer weight constants.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 10:16:19 UTC 2024 - 10.8K bytes - Viewed (0) -
src/net/http/transfer.go
func shouldClose(major, minor int, header Header, removeCloseHeader bool) bool { if major < 1 { return true } conv := header["Connection"] hasClose := httpguts.HeaderValuesContainsToken(conv, "close") if major == 1 && minor == 0 { return hasClose || !httpguts.HeaderValuesContainsToken(conv, "keep-alive") } if hasClose && removeCloseHeader { header.Del("Connection") } return hasClose }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 21 22:14:00 UTC 2024 - 31.1K bytes - Viewed (0) -
src/cmd/compile/internal/walk/range.go
} // hv2 := rune(ha[hv1]) nind := ir.NewIndexExpr(base.Pos, ha, hv1) nind.SetBounded(true) body = append(body, ir.NewAssignStmt(base.Pos, hv2, typecheck.Conv(nind, types.RuneType))) // if hv2 < utf8.RuneSelf nif := ir.NewIfStmt(base.Pos, nil, nil, nil) nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv2, ir.NewInt(base.Pos, utf8.RuneSelf)) // hv1++
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Sep 20 14:52:33 UTC 2023 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc
const auto float_graph = model_->subgraphs()->Get(subgraph_idx); ASSERT_EQ(quantized_graph->tensors()->size(), float_graph->tensors()->size()); // Make sure the graph only has one Conv operation. ASSERT_EQ(quantized_graph->operators()->size(), 1); const auto op = quantized_graph->operators()->Get(0); const uint32_t op_code_idx = op->opcode_index();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 32.3K bytes - Viewed (0) -
src/cmd/cgo/gcc.go
conv.getTypeIDs[n.Go[:len(n.Go)-9]] = true } } for i, n := range names { if types[i] == nil { continue } pos := f.NamePos[n] f, fok := types[i].(*dwarf.FuncType) if n.Kind != "type" && fok { n.Kind = "func" n.FuncType = conv.FuncType(f, pos) } else { n.Type = conv.Type(types[i], pos) switch n.Kind { case "iconst":
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 20 15:50:06 UTC 2024 - 97K bytes - Viewed (0) -
platforms/software/dependency-management/src/integTest/groovy/org/gradle/integtests/resolve/versions/VersionConflictResolutionIntegrationTest.groovy
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu May 09 11:33:46 UTC 2024 - 76.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/dense_to_sparse.cc
type = mlir::cast<ShapedType>(cst.getType()); } else { result.can_compress = false; return result; } // Currently we only support compressing weights of ops: // Conv, DepthwiseConv, TransposeConv, whose filter has rank 4, and // FullyConnected, whose filter has rank 2. if (type.getRank() != 2 && type.getRank() != 4) { result.can_compress = false; return result; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 16.1K bytes - Viewed (0)