Search Options

Results per page
Sort
Preferred Languages
Advance

Results 91 - 100 of 126 for conv4 (0.05 sec)

  1. src/net/http/transfer.go

    func shouldClose(major, minor int, header Header, removeCloseHeader bool) bool {
    	if major < 1 {
    		return true
    	}
    
    	conv := header["Connection"]
    	hasClose := httpguts.HeaderValuesContainsToken(conv, "close")
    	if major == 1 && minor == 0 {
    		return hasClose || !httpguts.HeaderValuesContainsToken(conv, "keep-alive")
    	}
    
    	if hasClose && removeCloseHeader {
    		header.Del("Connection")
    	}
    
    	return hasClose
    }
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Mar 21 22:14:00 UTC 2024
    - 31.1K bytes
    - Viewed (0)
  2. src/cmd/compile/internal/walk/range.go

    		}
    
    		// hv2 := rune(ha[hv1])
    		nind := ir.NewIndexExpr(base.Pos, ha, hv1)
    		nind.SetBounded(true)
    		body = append(body, ir.NewAssignStmt(base.Pos, hv2, typecheck.Conv(nind, types.RuneType)))
    
    		// if hv2 < utf8.RuneSelf
    		nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
    		nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv2, ir.NewInt(base.Pos, utf8.RuneSelf))
    
    		// hv1++
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Sep 20 14:52:33 UTC 2023
    - 17.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

        const auto float_graph = model_->subgraphs()->Get(subgraph_idx);
        ASSERT_EQ(quantized_graph->tensors()->size(),
                  float_graph->tensors()->size());
        // Make sure the graph only has one Conv operation.
        ASSERT_EQ(quantized_graph->operators()->size(), 1);
        const auto op = quantized_graph->operators()->Get(0);
        const uint32_t op_code_idx = op->opcode_index();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
  4. src/cmd/cgo/gcc.go

    			conv.getTypeIDs[n.Go[:len(n.Go)-9]] = true
    		}
    	}
    	for i, n := range names {
    		if types[i] == nil {
    			continue
    		}
    		pos := f.NamePos[n]
    		f, fok := types[i].(*dwarf.FuncType)
    		if n.Kind != "type" && fok {
    			n.Kind = "func"
    			n.FuncType = conv.FuncType(f, pos)
    		} else {
    			n.Type = conv.Type(types[i], pos)
    			switch n.Kind {
    			case "iconst":
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 20 15:50:06 UTC 2024
    - 97K bytes
    - Viewed (0)
  5. platforms/software/dependency-management/src/integTest/groovy/org/gradle/integtests/resolve/versions/VersionConflictResolutionIntegrationTest.groovy

                    conf
                    conf2
                    conf3
                    conf4
                }
                dependencies {
                    conf 'org:a:1.0', 'org:b:1.0'
                    conf2 'org:a:1.0', 'org:c:1.0'
                    conf3 'org:b:1.0', 'org:c:1.0'
                    conf4 'org:b:1.0', 'org:c:1.0', 'org:d:1.0'
                }
                task checkDeps {
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Thu May 09 11:33:46 UTC 2024
    - 76.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/dense_to_sparse.cc

        type = mlir::cast<ShapedType>(cst.getType());
      } else {
        result.can_compress = false;
        return result;
      }
    
      // Currently we only support compressing weights of ops:
      //   Conv, DepthwiseConv, TransposeConv, whose filter has rank 4, and
      //   FullyConnected, whose filter has rank 2.
      if (type.getRank() != 2 && type.getRank() != 4) {
        result.can_compress = false;
        return result;
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 16.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/tests/replace_cast_hacks_with_tf_xla_ops.mlir

    // CHECK: %[[PAD:.*]] = "tf.PadV2"({{.*}}, %[[CONST]], %[[CONST_1]])
    // CHECK: %[[CONV:.*]] = "tf.XlaConvV2"(%[[PAD]], %[[WEIGHT]]
    // CHECK-SAME: (tensor<1x4x5x5x3xi8>, tensor<2x3x3x3x2xi8>, tensor<3xi32>, tensor<3x2xi32>, tensor<3xi32>, tensor<3xi32>, tensor<i32>) -> tensor<1x3x2x3x2xi32>
    // CHECK: %[[SUB:.*]] = "tf.Sub"(%[[CONV]], %[[CONST_2]])
    }
    
    // -----
    
    module attributes {tf_saved_model.semantics} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 81K bytes
    - Viewed (0)
  8. src/cmd/compile/internal/typecheck/builtin.go

    	{"slicerunetostring", funcTag, 48},
    	{"stringtoslicebyte", funcTag, 50},
    	{"stringtoslicerune", funcTag, 53},
    	{"slicecopy", funcTag, 54},
    	{"decoderune", funcTag, 55},
    	{"countrunes", funcTag, 56},
    	{"convT", funcTag, 57},
    	{"convTnoptr", funcTag, 57},
    	{"convT16", funcTag, 59},
    	{"convT32", funcTag, 61},
    	{"convT64", funcTag, 62},
    	{"convTstring", funcTag, 63},
    	{"convTslice", funcTag, 66},
    	{"assertE2I", funcTag, 67},
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 21 21:08:03 UTC 2024
    - 16.2K bytes
    - Viewed (0)
  9. src/cmd/compile/internal/ssagen/ssa.go

    				conv = conv1
    			}
    		}
    		if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat {
    			if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
    				conv = conv1
    			}
    		}
    
    		if Arch.LinkArch.Family == sys.MIPS && !s.softFloat {
    			if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Jun 10 19:44:43 UTC 2024
    - 284.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py

                input_tensor,
                self.filters,
                strides=strides,
                dilations=dilations,
                padding=padding,
                data_format='NHWC',
                name='sample/conv',
            )
            if bias_fn is not None:
              out = nn_ops.bias_add(out, self.bias)
            if has_batch_norm:
              # Fusing is supported for non-training case.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 18.2K bytes
    - Viewed (0)
Back to top