Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 17 for Deallocate (0.3 sec)

  1. src/runtime/proc.go

    		// Deallocate old stack. We kept it in gfput because it was the
    		// right size when the goroutine was put on the free list, but
    		// the right size has changed since then.
    		systemstack(func() {
    			stackfree(gp.stack)
    			gp.stack.lo = 0
    			gp.stack.hi = 0
    			gp.stackguard0 = 0
    		})
    	}
    	if gp.stack.lo == 0 {
    		// Stack was deallocated in gfput or just above. Allocate a new one.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 207.5K bytes
    - Viewed (0)
  2. pkg/kubelet/cm/memorymanager/policy_static_test.go

    			for i := range testCase.pod.Spec.InitContainers {
    				err = p.Allocate(s, testCase.pod, &testCase.pod.Spec.InitContainers[i])
    				if !reflect.DeepEqual(err, testCase.expectedError) {
    					t.Fatalf("The actual error %v is different from the expected one %v", err, testCase.expectedError)
    				}
    			}
    
    			for i := range testCase.pod.Spec.Containers {
    				err = p.Allocate(s, testCase.pod, &testCase.pod.Spec.Containers[i])
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Sun Sep 17 05:49:15 UTC 2023
    - 100.4K bytes
    - Viewed (0)
  3. cluster/gce/gci/configure-helper.sh

        params+=("--concurrent-service-syncs=${CONCURRENT_SERVICE_SYNCS}")
      fi
      if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]]; then
        params+=("--allocate-node-cidrs=true")
      elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]]; then
        params+=("--allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}")
      fi
      if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]]; then
        params+=("--terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}")
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Mon Jun 10 22:07:47 UTC 2024
    - 141.1K bytes
    - Viewed (0)
  4. staging/src/k8s.io/apimachinery/pkg/util/managedfields/internal/testdata/swagger.json

            },
            "stdin": {
              "description": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.",
              "type": "boolean"
            },
            "stdinOnce": {
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue Feb 20 15:45:02 UTC 2024
    - 229.4K bytes
    - Viewed (0)
  5. src/reflect/value.go

    	}
    	nout := t.NumOut()
    
    	// Register argument space.
    	var regArgs abi.RegArgs
    
    	// Compute frame type.
    	frametype, framePool, abid := funcLayout(t, rcvrtype)
    
    	// Allocate a chunk of memory for frame if needed.
    	var stackArgs unsafe.Pointer
    	if frametype.Size() != 0 {
    		if nout == 0 {
    			stackArgs = framePool.Get().(unsafe.Pointer)
    		} else {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 21:17:41 UTC 2024
    - 119.9K bytes
    - Viewed (0)
  6. src/cmd/link/internal/ld/data.go

    }
    
    // allocateDataSections allocates sym.Section objects for data/rodata
    // (and related) symbols, and then assigns symbols to those sections.
    func (state *dodataState) allocateDataSections(ctxt *Link) {
    	// Allocate sections.
    	// Data is processed before segtext, because we need
    	// to see all symbols in the .data and .bss sections in order
    	// to generate garbage collection information.
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Jun 12 15:10:50 UTC 2024
    - 100.5K bytes
    - Viewed (0)
  7. src/reflect/all_test.go

    	}
    
    	got := int(testing.AllocsPerRun(10, func() {
    		iter := v.MapRange()
    		for iter.Next() {
    			k.SetIterKey(iter)
    			e.SetIterValue(iter)
    		}
    	}))
    	// Calling MapRange should not allocate even though it returns a *MapIter.
    	// The function is inlineable, so if the local usage does not escape
    	// the *MapIter, it can remain stack allocated.
    	want := 0
    	if got != want {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:00:11 UTC 2024
    - 218.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td

        a single value from first replica.
    
        The benefit of this optimization is reduced memory requirement on host. For
        multiple writes (one from each replica) to such variables, the host would
        allocate buffer space to receive the device output from all replicas, which is
        not required. We can use the output of first replica in such cases.
      }];
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:18:05 UTC 2024
    - 99.6K bytes
    - Viewed (0)
  9. cmd/bucket-replication.go

    	}
    
    	resyncStatus := ResyncFailed
    	defer func() {
    		s.markStatus(resyncStatus, opts, objectAPI)
    		globalSiteResyncMetrics.incBucket(opts, resyncStatus)
    		s.workerCh <- struct{}{}
    	}()
    	// Allocate new results channel to receive ObjectInfo.
    	objInfoCh := make(chan itemOrErr[ObjectInfo])
    	cfg, err := getReplicationConfig(ctx, opts.bucket)
    	if err != nil {
    Registered: Sun Jun 16 00:44:34 UTC 2024
    - Last Modified: Thu Jun 13 06:56:12 UTC 2024
    - 114.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/ir/tfl_ops.cc

      // Hold current ND index in input tensor when computing
      // permutation.
      llvm::OwningArrayRef<uint64_t> current_input_index(
          input_tensor.getType().getRank());
    
      // Allocate raw data and retrieve address of the first char in its raw
      // buffer.
      llvm::OwningArrayRef<char> raw_output_arr(input_tensor.getRawData());
      char* raw_output = (char*)raw_output_arr.data();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 169.2K bytes
    - Viewed (0)
Back to top