- Sort Score
- Result 10 results
- Languages All
Results 141 - 150 of 302 for Drivers (0.3 sec)
-
cmd/erasure-server-pool.go
if errors.Is(err, errNoHealRequired) { countNoHeal++ } r.DiskCount += result.DiskCount r.SetCount += result.SetCount r.Before.Drives = append(r.Before.Drives, result.Before.Drives...) r.After.Drives = append(r.After.Drives, result.After.Drives...) } // No heal returned by all serverPools, return errNoHealRequired if countNoHeal == len(z.serverPools) { return r, errNoHealRequired }
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 89.2K bytes - Viewed (0) -
cmd/admin-handlers.go
// if a "restart/stop" was successful or not. Service signal now supports // a dry-run that helps skip the nodes that may have hung drives. By default // restart/stop will ignore the servers that are hung on drives. You can use // 'force' param to force restart even with hung drives if needed. func (a adminAPIHandlers) ServiceV2Handler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() vars := mux.Vars(r)
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 99.7K bytes - Viewed (0) -
cmd/admin-heal-ops.go
if serverDebugLog { fmt.Printf("Task in the queue: %#v\n", task) } case <-h.ctx.Done(): return nil } countOKDrives := func(drives []madmin.HealDriveInfo) (count int) { for _, drive := range drives { if drive.State == madmin.DriveStateOk { count++ } } return count } // task queued, now wait for the response. select {Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 25.4K bytes - Viewed (0) -
buildscripts/verify-healing.sh
echo "my content" | /tmp/mc pipe myminio/testbucket/file-$i done } function perform_test() { start_port=$2 start_minio_3_node $start_port echo "Testing Distributed Erasure setup healing of drives" echo "Remove the contents of the disks belonging to '${1}' node" rm -rf ${WORK_DIR}/${1}/*/ set -x start_minio_3_node $start_port check_heal ${1} rv=$? if [ "$rv" == "1" ]; then
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Jul 12 20:51:54 UTC 2024 - 4K bytes - Viewed (0) -
tests/table_test.go
package tests_test import ( "regexp" "sync" "testing" "gorm.io/driver/gaussdb" "gorm.io/driver/postgres" "gorm.io/gorm" "gorm.io/gorm/schema" "gorm.io/gorm/utils/tests" . "gorm.io/gorm/utils/tests" ) type UserWithTable struct { gorm.Model Name string } func (UserWithTable) TableName() string { return "gorm.user" } func TestTable(t *testing.T) {
Registered: Sun Dec 28 09:35:17 UTC 2025 - Last Modified: Mon Jul 21 02:46:58 UTC 2025 - 12.7K bytes - Viewed (0) -
docs/config/README.md
In most setups this is sufficient to heal the content after drive replacements. Setting `max_sleep` to a *lower* value and setting `max_io` to a *higher* value would make heal go faster.
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Tue Aug 12 18:20:36 UTC 2025 - 18.1K bytes - Viewed (1) -
api/go1.17.txt
pkg database/sql, method (*NullByte) Scan(interface{}) error pkg database/sql, method (*NullInt16) Scan(interface{}) error pkg database/sql, method (NullByte) Value() (driver.Value, error) pkg database/sql, method (NullInt16) Value() (driver.Value, error) pkg database/sql, type NullByte struct pkg database/sql, type NullByte struct, Byte uint8 pkg database/sql, type NullByte struct, Valid bool pkg database/sql, type NullInt16 struct
Registered: Tue Dec 30 11:13:12 UTC 2025 - Last Modified: Fri Feb 17 20:31:46 UTC 2023 - 18K bytes - Viewed (0) -
internal/ringbuffer/ring_buffer.go
func (r *RingBuffer) Reset() { r.mu.Lock() defer r.mu.Unlock() // Set error so any readers/writers will return immediately. r.setErr(errors.New("reset called"), true) if r.block { r.readCond.Broadcast() r.writeCond.Broadcast() } // Unlock the mutex so readers/writers can finish. r.mu.Unlock() r.wg.Wait() r.mu.Lock() r.r = 0 r.w = 0 r.err = nil r.isFull = false
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 13.3K bytes - Viewed (0) -
tests/connpool_test.go
package tests_test import ( "context" "database/sql" "os" "reflect" "testing" "gorm.io/driver/mysql" "gorm.io/gorm" . "gorm.io/gorm/utils/tests" ) type wrapperTx struct { *sql.Tx conn *wrapperConnPool } func (c *wrapperTx) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) { c.conn.got = append(c.conn.got, query) return c.Tx.PrepareContext(ctx, query) }
Registered: Sun Dec 28 09:35:17 UTC 2025 - Last Modified: Sun May 25 07:40:40 UTC 2025 - 5.5K bytes - Viewed (0) -
cmd/erasure-healing_test.go
} // Check the state of the object in the first disk (should be missing) if hr.Before.Drives[0].State != madmin.DriveStateMissing { t.Fatalf("Unexpected drive state: %v", hr.Before.Drives[0].State) } // Check the state of all other disks (should be ok) for i, h := range append(hr.Before.Drives[1:], hr.After.Drives...) { if h.State != madmin.DriveStateOk { t.Fatalf("Unexpected drive state (%d): %v", i+1, h.State)
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 48.5K bytes - Viewed (0)