Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 54 for bigN (0.22 sec)

  1. pkg/proxy/iptables/proxier.go

    	// is just for efficiency, not correctness.
    	args := make([]string, 64)
    
    	// Compute total number of endpoint chains across all services
    	// to get a sense of how big the cluster is.
    	totalEndpoints := 0
    	for svcName := range proxier.svcPortMap {
    		totalEndpoints += len(proxier.endpointsMap[svcName])
    	}
    	proxier.largeClusterMode = (totalEndpoints > largeClusterEndpointsThreshold)
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue May 21 14:39:54 UTC 2024
    - 65.1K bytes
    - Viewed (0)
  2. src/html/template/exec_test.go

    	{"ideal exp float", "{{typeOf 1e1}}", "float64", 0, true},
    	{"ideal complex", "{{typeOf 1i}}", "complex128", 0, true},
    	{"ideal int", "{{typeOf " + bigInt + "}}", "int", 0, true},
    	{"ideal too big", "{{typeOf " + bigUint + "}}", "", 0, false},
    	{"ideal nil without type", "{{nil}}", "", 0, false},
    
    	// Fields of structs.
    	{".X", "-{{.X}}-", "-x-", tVal, true},
    	{".U.V", "-{{.U.V}}-", "-v-", tVal, true},
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat Feb 24 21:59:12 UTC 2024
    - 57.6K bytes
    - Viewed (0)
  3. src/index/suffixarray/sais2.go

    	//
    	// For short inputs, the averages may not run in our favor, but then we
    	// can often fall back to using the length-512 tmp available in the
    	// top-most call. (Also a short allocation would not be a big deal.)
    	//
    	// For pathological inputs, we fall back to allocating a new tmp of length
    	// max(maxID, numLMS/2). This level of the recursion needs maxID,
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 18 23:57:18 UTC 2024
    - 52.3K bytes
    - Viewed (0)
  4. src/runtime/mgcscavenge.go

    // or that is min pages or greater in size but not aligned to min. min must be
    // a non-zero power of 2 <= maxPagesPerPhysPage.
    //
    // max is a hint for how big of a region is desired. If max >= pallocChunkPages, then
    // findScavengeCandidate effectively returns entire free and unscavenged regions.
    // If max < pallocChunkPages, it may truncate the returned region such that size is
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:48:45 UTC 2024
    - 52.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/mark_for_compilation_pass.cc

      for (Node* node : graph_->nodes()) {
        if (!IsCompilationCandidate(node)) {
          cluster_for_node_[node->id()].Get() = nullptr;
          continue;
        }
    
        // We want clusters to be big enough that the benefit from XLA's
        // optimizations offsets XLA related overhead (for instance we add some
        // Switch/Merge nodes into the graph to implement lazy compilation).  To
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
  6. src/text/template/exec_test.go

    	{"ideal exp float", "{{typeOf 1e1}}", "float64", 0, true},
    	{"ideal complex", "{{typeOf 1i}}", "complex128", 0, true},
    	{"ideal int", "{{typeOf " + bigInt + "}}", "int", 0, true},
    	{"ideal too big", "{{typeOf " + bigUint + "}}", "", 0, false},
    	{"ideal nil without type", "{{nil}}", "", 0, false},
    
    	// Fields of structs.
    	{".X", "-{{.X}}-", "-x-", tVal, true},
    	{".U.V", "-{{.U.V}}-", "-v-", tVal, true},
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 24 22:23:55 UTC 2024
    - 60.1K bytes
    - Viewed (0)
  7. src/runtime/mbitmap.go

    // ordering needs to match the same byte ordering the compiler would emit. The compiler always
    // emits the bitmap data in little endian byte ordering, so on big endian platforms these
    // uintptrs will have their byte orders swapped from what they normally would be.
    //
    // heapBitsInSpan(span.elemsize) or span.isUserArenaChunk must be true.
    //
    //go:nosplit
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  8. cmd/erasure-server-pool.go

    		//
    		// This happens repeatedly for all objects that are created concurrently() avoiding this
    		// as a List() call is an important performance improvement.
    		//
    		// Spark based s3a committers are a  big enough use-case to have this optimization.
    		//
    		// A sample code to see the improvements is as follows, this sample code is
    		// simply a read on JSON from MinIO and write it back as "parquet".
    		//
    Registered: Sun Jun 16 00:44:34 UTC 2024
    - Last Modified: Thu May 30 11:58:12 UTC 2024
    - 82.5K bytes
    - Viewed (0)
  9. pkg/controller/job/job_controller.go

    			// We stop counting pods and removing finalizers here to:
    			// 1. Ensure that the UIDs representation are under 20 KB.
    			// 2. Cap the number of finalizer removals so that syncing of big Jobs
    			//    doesn't starve smaller ones.
    			//
    			// The job will be synced again because the Job status and Pod updates
    			// will put the Job back to the work queue.
    			reachedMaxUncountedPods = true
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Mon Jun 10 23:56:37 UTC 2024
    - 77.6K bytes
    - Viewed (0)
  10. src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go

    	// encoding/binary helpers to write the bytes without worrying
    	// about the ordering.
    	binary.BigEndian.PutUint32(sa.raw[2:6], px_proto_oe)
    	// This field is deliberately big-endian, unlike the previous
    	// one. The kernel expects SID to be in network byte order.
    	binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID)
    	copy(sa.raw[8:14], sa.Remote)
    	for i := 14; i < 14+IFNAMSIZ; i++ {
    		sa.raw[i] = 0
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Mar 07 05:26:45 UTC 2024
    - 77.5K bytes
    - Viewed (0)
Back to top