zpool online, offline and clear devices
This commit is contained in:
parent
44a53fa2e4
commit
441e099de9
16
zpool.c
16
zpool.c
|
@ -496,4 +496,20 @@ nvlist_ptr go_zpool_search_import(libzfs_handle_ptr zfsh, int paths, char **path
|
||||||
// idata.scan = 0;
|
// idata.scan = 0;
|
||||||
|
|
||||||
return zpool_search_import(zfsh, &idata);
|
return zpool_search_import(zfsh, &idata);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int do_zpool_clear(zpool_list_t *pool, const char *device, u_int32_t rewind_policy) {
|
||||||
|
nvlist_t *policy = NULL;
|
||||||
|
int ret = 0;
|
||||||
|
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
|
||||||
|
nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind_policy) != 0)
|
||||||
|
return (1);
|
||||||
|
|
||||||
|
if (zpool_clear(pool->zph, device, policy) != 0)
|
||||||
|
ret = 1;
|
||||||
|
|
||||||
|
nvlist_free(policy);
|
||||||
|
|
||||||
|
return (ret);
|
||||||
}
|
}
|
13
zpool.h
13
zpool.h
|
@ -5,6 +5,15 @@
|
||||||
#ifndef SERVERWARE_ZPOOL_H
|
#ifndef SERVERWARE_ZPOOL_H
|
||||||
#define SERVERWARE_ZPOOL_H
|
#define SERVERWARE_ZPOOL_H
|
||||||
|
|
||||||
|
/* Rewind request information */
|
||||||
|
#define ZPOOL_NO_REWIND 1 /* No policy - default behavior */
|
||||||
|
#define ZPOOL_NEVER_REWIND 2 /* Do not search for best txg or rewind */
|
||||||
|
#define ZPOOL_TRY_REWIND 4 /* Search for best txg, but do not rewind */
|
||||||
|
#define ZPOOL_DO_REWIND 8 /* Rewind to best txg w/in deferred frees */
|
||||||
|
#define ZPOOL_EXTREME_REWIND 16 /* Allow extreme measures to find best txg */
|
||||||
|
#define ZPOOL_REWIND_MASK 28 /* All the possible rewind bits */
|
||||||
|
#define ZPOOL_REWIND_POLICIES 31 /* All the possible policy bits */
|
||||||
|
|
||||||
struct zpool_list {
|
struct zpool_list {
|
||||||
zpool_handle_t *zph;
|
zpool_handle_t *zph;
|
||||||
void *pnext;
|
void *pnext;
|
||||||
|
@ -70,6 +79,10 @@ nvlist_ptr get_zpool_vdev_tree(nvlist_ptr nv);
|
||||||
|
|
||||||
nvlist_ptr go_zpool_search_import(libzfs_handle_ptr zfsh, int paths, char **path, boolean_t do_scan);
|
nvlist_ptr go_zpool_search_import(libzfs_handle_ptr zfsh, int paths, char **path, boolean_t do_scan);
|
||||||
|
|
||||||
|
__uint64_t set_zpool_vdev_online(zpool_list_t *pool, const char *path, int flags);
|
||||||
|
int set_zpool_vdev_offline(zpool_list_t *pool, const char *path, boolean_t istmp, boolean_t force);
|
||||||
|
int do_zpool_clear(zpool_list_t *pool, const char *device, u_int32_t rewind_policy);
|
||||||
|
|
||||||
|
|
||||||
extern char *sZPOOL_CONFIG_VERSION;
|
extern char *sZPOOL_CONFIG_VERSION;
|
||||||
extern char *sZPOOL_CONFIG_POOL_NAME;
|
extern char *sZPOOL_CONFIG_POOL_NAME;
|
||||||
|
|
|
@ -0,0 +1,37 @@
|
||||||
|
#include <libzfs.h>
|
||||||
|
#include <memory.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <sys/fs/zfs.h>
|
||||||
|
|
||||||
|
#include "common.h"
|
||||||
|
#include "zpool.h"
|
||||||
|
|
||||||
|
|
||||||
|
__uint64_t set_zpool_vdev_online(zpool_list_t *pool, const char *path, int flags) {
|
||||||
|
vdev_state_t newstate = VDEV_STATE_UNKNOWN;
|
||||||
|
zpool_vdev_online(pool->zph, path, flags, &newstate);
|
||||||
|
return newstate;
|
||||||
|
}
|
||||||
|
|
||||||
|
int set_zpool_vdev_offline(zpool_list_t *pool, const char *path, boolean_t istmp, boolean_t force) {
|
||||||
|
int ret = 0;
|
||||||
|
// if (force) {
|
||||||
|
// uint64_t guid = zpool_vdev_path_to_guid(pool->zph, path);
|
||||||
|
// vdev_aux_t aux;
|
||||||
|
// if (istmp == B_FALSE) {
|
||||||
|
// /* Force the fault to persist across imports */
|
||||||
|
// aux = VDEV_AUX_EXTERNAL_PERSIST;
|
||||||
|
// } else {
|
||||||
|
// aux = VDEV_AUX_EXTERNAL;
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if (guid == 0 || zpool_vdev_fault(pool->zph, guid, aux) != 0)
|
||||||
|
// ret = 1;
|
||||||
|
// } else {
|
||||||
|
if (zpool_vdev_offline(pool->zph, path, istmp) != 0)
|
||||||
|
ret = 1;
|
||||||
|
// }
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,143 @@
|
||||||
|
package zfs
|
||||||
|
|
||||||
|
// #include <stdlib.h>
|
||||||
|
// #include <libzfs.h>
|
||||||
|
// #include "common.h"
|
||||||
|
// #include "zpool.h"
|
||||||
|
// #include "zfs.h"
|
||||||
|
import "C"
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Online try to set dev online
|
||||||
|
// expand - expand storage
|
||||||
|
func (pool *Pool) Online(expand bool, devs ...string) (err error) {
|
||||||
|
cflags := C.int(0)
|
||||||
|
if expand {
|
||||||
|
cflags = C.ZFS_ONLINE_EXPAND
|
||||||
|
}
|
||||||
|
for _, dev := range devs {
|
||||||
|
csdev := C.CString(dev)
|
||||||
|
var newstate VDevState
|
||||||
|
if newstate = VDevState(C.set_zpool_vdev_online(pool.list, csdev, cflags)); newstate != VDevStateUnknown {
|
||||||
|
if newstate != VDevStateHealthy {
|
||||||
|
err = fmt.Errorf(
|
||||||
|
"Device '%s' onlined, but remains in faulted state",
|
||||||
|
dev)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err = LastError()
|
||||||
|
}
|
||||||
|
C.free(unsafe.Pointer(csdev))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offline Take the device/s in offline state
|
||||||
|
func (pool *Pool) Offline(force bool, devs ...string) (err error) {
|
||||||
|
return pool.offline(false, force, devs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OfflineTemp Take the device/s in offline state temporary,
|
||||||
|
// upon reboot, the specified physical device reverts to its previous state.
|
||||||
|
// force - Force the device into a faulted state.
|
||||||
|
func (pool *Pool) OfflineTemp(force bool, devs ...string) (err error) {
|
||||||
|
return pool.offline(true, force, devs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// temp - Upon reboot, the specified physical device reverts to its previous state.
|
||||||
|
// force - Force the device into a faulted state.
|
||||||
|
func (pool *Pool) offline(temp, force bool, devs ...string) (err error) {
|
||||||
|
for _, dev := range devs {
|
||||||
|
csdev := C.CString(dev)
|
||||||
|
var newstate VDevState
|
||||||
|
if newstate = VDevState(C.set_zpool_vdev_offline(pool.list, csdev, booleanT(temp), booleanT(force))); newstate != VDevStateUnknown {
|
||||||
|
if newstate != VDevStateHealthy {
|
||||||
|
err = fmt.Errorf(
|
||||||
|
"Device '%s' offlined, but remains in faulted state",
|
||||||
|
dev)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err = LastError()
|
||||||
|
}
|
||||||
|
C.free(unsafe.Pointer(csdev))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear - Clear all errors associated with a pool or a particular device.
|
||||||
|
func (pool *Pool) Clear(device string) (err error) {
|
||||||
|
csdev := C.CString(device)
|
||||||
|
if len(device) == 0 {
|
||||||
|
csdev = nil
|
||||||
|
}
|
||||||
|
if sc := C.do_zpool_clear(pool.list, csdev, C.ZPOOL_NO_REWIND); sc != 0 {
|
||||||
|
err = fmt.Errorf("Pool clear failed")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attach test
|
||||||
|
// func (pool *Pool) attach(props PoolProperties, devs ...string) (err error) {
|
||||||
|
// cprops := toCPoolProperties(props)
|
||||||
|
// if cprops != nil {
|
||||||
|
// defer C.nvlist_free(cprops)
|
||||||
|
// } else {
|
||||||
|
// return fmt.Errorf("Out of memory [Pool Attach properties]")
|
||||||
|
// }
|
||||||
|
// cdevs := C.alloc_cstrings(C.int(len(devs)))
|
||||||
|
// if cdevs != nil {
|
||||||
|
// defer C.free(unsafe.Pointer(cdevs))
|
||||||
|
// } else {
|
||||||
|
// return fmt.Errorf("Out of memory [Pool Attach args]")
|
||||||
|
// }
|
||||||
|
// for i, dp := range devs {
|
||||||
|
// tmp := C.CString(dp)
|
||||||
|
// if tmp != nil {
|
||||||
|
// defer C.free(unsafe.Pointer(tmp))
|
||||||
|
// } else {
|
||||||
|
// return fmt.Errorf("Out of memory [Pool Attach dev]")
|
||||||
|
// }
|
||||||
|
// C.strings_setat(cdevs, C.int(i), tmp)
|
||||||
|
// }
|
||||||
|
// // vroot := C.make_root_vdev(pool.list.zph, cprops, 0, 0, 0, 0, len(devs), cdevs)
|
||||||
|
// var nvroot *C.struct_nvlist
|
||||||
|
// if r := C.nvlist_alloc(&nvroot, C.NV_UNIQUE_NAME, 0); r != 0 {
|
||||||
|
// err = errors.New("Failed to allocate root vdev")
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
// csTypeRoot := C.CString(string(VDevTypeRoot))
|
||||||
|
// r := C.nvlist_add_string(nvroot, C.sZPOOL_CONFIG_TYPE,
|
||||||
|
// csTypeRoot)
|
||||||
|
// C.free(unsafe.Pointer(csTypeRoot))
|
||||||
|
// if r != 0 {
|
||||||
|
// err = errors.New("Failed to allocate root vdev")
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
// defer C.nvlist_free(nvroot)
|
||||||
|
|
||||||
|
// // Now we need to build specs (vdev hierarchy)
|
||||||
|
// if err = buildVDevTree(nvroot, VDevTypeRoot, vdev.Devices, vdev.Spares, vdev.L2Cache, props); err != nil {
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (pool *Pool) AttachForce(devs ...string) (err error) {
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (pool *Pool) Detach(devs ...string) (err error) {
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (pool *Pool) DetachForce(devs ...string) (err error) {
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (pool *Pool) Replace(devs ...string) (err error) {
|
||||||
|
// return
|
||||||
|
// }
|
Loading…
Reference in New Issue