Search Options

Results per page
Sort
Preferred Languages
Advance

Results 101 - 110 of 322 for proc (0.04 sec)

  1. src/cmd/link/internal/ld/inittask.go

    	ldr := ctxt.loader
    	if ldr.Lookup("runtime.runtime_inittasks", 0) != 0 {
    		t := ctxt.inittaskSym([]string{"runtime..inittask"}, "go:runtime.inittasks")
    
    		// This slice header is already defined in runtime/proc.go, so we update it here with new contents.
    		sh := ldr.Lookup("runtime.runtime_inittasks", 0)
    		sb := ldr.MakeSymbolUpdater(sh)
    		sb.SetSize(0)
    		sb.SetType(sym.SNOPTRDATA) // Could be SRODATA, but see issue 58857.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jan 30 20:09:45 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  2. src/packaging/deb/init.d/fess

    	fi
    
    	if [ -n "$MAX_OPEN_FILES" ]; then
    		ulimit -n $MAX_OPEN_FILES
    	fi
    
    	if [ -n "$MAX_LOCKED_MEMORY" ]; then
    		ulimit -l $MAX_LOCKED_MEMORY
    	fi
    
    	if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then
    		sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT
    	fi
    
    	# Start Daemon
    Registered: Wed Jun 12 13:08:18 UTC 2024
    - Last Modified: Sun Jan 15 06:32:15 UTC 2023
    - 5.8K bytes
    - Viewed (0)
  3. platforms/documentation/docs/src/docs/userguide/optimizing-performance/file_system_watching.adoc

    File system watching uses one inotify watch per watched directory.
    You can see the current limit of inotify watches per user by running:
    
    [source,bash]
    ----
    cat /proc/sys/fs/inotify/max_user_watches
    ----
    
    To increase the limit to e.g. 512K watches run the following:
    
    [source,bash]
    ----
    echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf
    ----
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Mon Feb 26 16:37:56 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  4. cni/pkg/nodeagent/server.go

    		return nil, fmt.Errorf("error initializing the host rules for health checks: %w", err)
    	}
    
    	podNetns := NewPodNetnsProcFinder(os.DirFS(filepath.Join(pconstants.HostMountsPath, "proc")))
    	netServer := newNetServer(ztunnelServer, podNsMap, iptablesConfigurator, podNetns, set)
    
    	// Set some defaults
    	s := &Server{
    		ctx:        ctx,
    		kubeClient: client,
    		isReady:    ready,
    Registered: Fri Jun 14 15:00:06 UTC 2024
    - Last Modified: Fri May 31 21:45:18 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  5. src/runtime/tracestatus.go

    	// The status should never be bad. Some invariant must have been violated.
    	if status == traceProcBad {
    		print("runtime: pid=", pid, "\n")
    		throw("attempted to trace a bad status for a proc")
    	}
    
    	// Trace the status.
    	w = w.event(traceEvProcStatus, traceArg(pid), traceArg(status))
    
    	// Trace any special ranges that are in-progress.
    	if inSweep {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 15 17:03:35 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  6. pkg/kubelet/prober/scale_test.go

    // that the ephemeral port is not exhausted.
    
    // The default port range on a normal Linux system has 28321 free ephemeral ports per
    // tuple srcIP,srcPort:dstIP:dstPort:Proto: /proc/sys/net/ipv4/ip_local_port_range 32768 60999
    // 1 pods x 600 containers/pod x 1 probes/container x 1 req/sec = 600 req/sec
    // 600 req/sec x 59 sec = 35400
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Aug 16 16:33:01 UTC 2023
    - 8.2K bytes
    - Viewed (0)
  7. pkg/volume/nfs/nfs.go

    	*nfs
    }
    
    func (c *nfsUnmounter) TearDown() error {
    	return c.TearDownAt(c.GetPath())
    }
    
    func (c *nfsUnmounter) TearDownAt(dir string) error {
    	// Use extensiveMountPointCheck to consult /proc/mounts. We can't use faster
    	// IsLikelyNotMountPoint (lstat()), since there may be root_squash on the
    	// NFS server and kubelet may not be able to do lstat/stat() there.
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue May 14 06:17:25 UTC 2024
    - 9.5K bytes
    - Viewed (0)
  8. src/crypto/x509/pem_decrypt.go

    	for i := 0; i < pad; i++ {
    		encrypted = append(encrypted, byte(pad))
    	}
    	enc.CryptBlocks(encrypted, encrypted)
    
    	return &pem.Block{
    		Type: blockType,
    		Headers: map[string]string{
    			"Proc-Type": "4,ENCRYPTED",
    			"DEK-Info":  ciph.name + "," + hex.EncodeToString(iv),
    		},
    		Bytes: encrypted,
    	}, nil
    }
    
    func cipherByName(name string) *rfc1423Algo {
    	for i := range rfc1423Algos {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Oct 13 17:09:47 UTC 2023
    - 7.2K bytes
    - Viewed (0)
  9. src/runtime/traceevent.go

    }
    
    // eventWriter creates a new traceEventWriter. It is the main entrypoint for writing trace events.
    //
    // Before creating the event writer, this method will emit a status for the current goroutine
    // or proc if it exists, and if it hasn't had its status emitted yet. goStatus and procStatus indicate
    // what the status of goroutine or P should be immediately *before* the events that are about to
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:47:01 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  10. src/internal/poll/splice_linux.go

    	// the kernel to move in a single call to splice(2).
    	// We use 1MB as Splice writes data through a pipe, and 1MB is the default maximum pipe buffer size,
    	// which is determined by /proc/sys/fs/pipe-max-size.
    	maxSpliceSize = 1 << 20
    )
    
    // Splice transfers at most remain bytes of data from src to dst, using the
    // splice system call to minimize copies of data from and to userspace.
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 02 21:49:26 UTC 2024
    - 7.6K bytes
    - Viewed (0)
Back to top