Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 88 for fallocate (0.37 sec)

  1. src/cmd/vendor/golang.org/x/sys/windows/security_windows.go

    	src := unsafe.Slice((*byte)(unsafe.Pointer(selfRelativeSD)), sdLen)
    	// SECURITY_DESCRIPTOR has pointers in it, which means checkptr expects for it to
    	// be aligned properly. When we're copying a Windows-allocated struct to a
    	// Go-allocated one, make sure that the Go allocation is aligned to the
    	// pointer size.
    	const psize = int(unsafe.Sizeof(uintptr(0)))
    	alloc := make([]uintptr, (sdLen+psize-1)/psize)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jun 04 16:19:04 UTC 2024
    - 52.5K bytes
    - Viewed (0)
  2. src/runtime/asm_amd64.s

    	MOVQ	SP, SI;				\
    	ADDQ	BX, DI;				\
    	ADDQ	BX, SI;				\
    	SUBQ	BX, CX;				\
    	CALL	callRet<>(SB);			\
    	RET
    
    // callRet copies return values back at the end of call*. This is a
    // separate function so it can allocate stack space for the arguments
    // to reflectcallmove. It does not follow the Go ABI; it expects its
    // arguments in registers.
    TEXT callRet<>(SB), NOSPLIT, $40-0
    	NO_LOCAL_POINTERS
    	MOVQ	DX, 0(SP)
    	MOVQ	DI, 8(SP)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat May 11 20:38:24 UTC 2024
    - 60.4K bytes
    - Viewed (0)
  3. src/cmd/link/internal/ld/dwarf.go

    				// that stores the return address to the stack frame is not the
    				// same one that allocates the frame.
    				if pcsp.Value > 0 {
    					// The return address is preserved at (CFA-frame_size)
    					// after a stack frame has been allocated.
    					deltaBuf = append(deltaBuf, dwarf.DW_CFA_offset_extended_sf)
    					deltaBuf = dwarf.AppendUleb128(deltaBuf, uint64(thearch.Dwarfreglr))
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 02 16:25:18 UTC 2024
    - 72.4K bytes
    - Viewed (0)
  4. src/syscall/syscall_windows.go

    // Only a limited number of callbacks may be created in a single Go process, and any memory allocated
    // for these callbacks is never released.
    // Between NewCallback and NewCallbackCDecl, at least 1024 callbacks can always be created.
    func NewCallback(fn any) uintptr {
    	return compileCallback(fn, true)
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Mar 21 11:49:46 UTC 2024
    - 52.7K bytes
    - Viewed (0)
  5. src/cmd/link/internal/ld/xcoff.go

    	C_DWARF   = 112 // DWARF symbol
    	C_GSYM    = 128 // Global variable
    	C_LSYM    = 129 // Automatic variable allocated on stack
    	C_PSYM    = 130 // Argument to subroutine allocated on stack
    	C_RSYM    = 131 // Register variable
    	C_RPSYM   = 132 // Argument to function or procedure stored in register
    	C_STSYM   = 133 // Statically allocated symbol
    	C_BCOMM   = 135 // Beginning of common block
    	C_ECOML   = 136 // Local member of common block
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Dec 01 19:58:23 UTC 2023
    - 51.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting.cc

        //   executed.
        // Both cases are handled by the condition below.
        if (info.is_written && !info.is_written_all) info.is_read = true;
    
        // Allocate a result index for written resources that don't have one.
        if (info.is_written) {
          written_resources_.insert(resource);
          if (!info.IsResultIndexAssigned()) info.result_index = num_new_results_++;
        }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 55.1K bytes
    - Viewed (0)
  7. pkg/kubelet/kuberuntime/kuberuntime_manager.go

    		containerStatus := podStatus.FindContainerStatusByName(container.Name)
    
    		// Call internal container post-stop lifecycle hook for any non-running container so that any
    		// allocated cpus are released immediately. If the container is restarted, cpus will be re-allocated
    		// to it.
    		if containerStatus != nil && containerStatus.State != kubecontainer.ContainerStateRunning {
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed May 22 02:01:31 UTC 2024
    - 64.7K bytes
    - Viewed (0)
  8. android/guava/src/com/google/common/collect/Iterators.java

       * hasNext()} method will return {@code false}.
       *
       * @param iterator the iterator to copy
       * @param type the type of the elements
       * @return a newly-allocated array into which all the elements of the iterator have been copied
       */
      @GwtIncompatible // Array.newInstance(Class, int)
      public static <T extends @Nullable Object> T[] toArray(
    Registered: Wed Jun 12 16:38:11 UTC 2024
    - Last Modified: Fri May 03 14:46:32 UTC 2024
    - 50.2K bytes
    - Viewed (0)
  9. guava/src/com/google/common/collect/Iterators.java

       * hasNext()} method will return {@code false}.
       *
       * @param iterator the iterator to copy
       * @param type the type of the elements
       * @return a newly-allocated array into which all the elements of the iterator have been copied
       */
      @GwtIncompatible // Array.newInstance(Class, int)
      public static <T extends @Nullable Object> T[] toArray(
    Registered: Wed Jun 12 16:38:11 UTC 2024
    - Last Modified: Fri May 03 14:46:32 UTC 2024
    - 50.2K bytes
    - Viewed (0)
  10. staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset_test.go

    // That is, a flow that can not use all the seats that it is allocated
    // for a while.  During that time, the queues that serve that flow
    // advance their `virtualStart` (that is, R(next dispatch in virtual world))
    // more slowly than the other queues (which are using more seats than they
    // are allocated).  The implementation has a hack that addresses part of
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue Sep 26 12:55:23 UTC 2023
    - 58.4K bytes
    - Viewed (0)
Back to top