Merge branch 'sw-3.1' into dev

This commit is contained in:
Faruk Kasumovic 2018-06-29 13:18:55 +02:00
commit 1830efcb43
18 changed files with 3319 additions and 934 deletions

2
.gitignore vendored
View File

@ -1 +1,3 @@
.gitconfig
*.sublime-*
go-libzfs.test

View File

@ -1,19 +1,19 @@
Copyright (c) 2015, Faruk Kasumovic
Copyright (c) 2015, Faruk Kasumovic
All rights reserved.
Redistribution and use in source and binary forms, with or without
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
* Neither the name of go-libzfs nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
@ -25,4 +25,3 @@ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -2,7 +2,7 @@
**go-libzfs** currently implements basic manipulation of ZFS pools and data sets. Plan is to add more in further development, improve documentation with more examples, and add more tests. _go-libzfs_ use libzfs C library and does not wrap OpenZFS CLI tools. That way it ensure best performance. Per my personal opinion its more reliable way to do it, and that libzfs is less subject of possible changes then CLI tools. Goal is to let easy using and manipulating OpenZFS form with in go, and tries to map libzfs C library in to go style package respecting golang common practice.
[![GoDoc](https://godoc.org/github.com/fkasumovic/go-libzfs?status.svg)](https://godoc.org/github.com/fkasumovic/go-libzfs)
[![GoDoc](https://godoc.org/github.com/bicomsystems/go-libzfs?status.svg)](https://godoc.org/github.com/bicomsystems/go-libzfs)
## Main features
@ -21,14 +21,14 @@
## Installing
```sh
go get github.com/fkasumovic/go-libzfs
go get github.com/bicomsystems/go-libzfs
```
## Testing
```sh
# On command line shell run
cd $GOPATH/src/github.com/fkasumovic/go-libzfs
cd $GOPATH/src/github.com/bicomsystems/go-libzfs
go test
```
@ -46,10 +46,10 @@ props := make(map[ZFSProp]Property)
// similar to convert in to string (base 10) from numeric type.
strSize := "1073741824"
props[ZFSPropVolsize] = Property{Value: strSize}
props[DatasetPropVolsize] = Property{Value: strSize}
// In addition I explicitly choose some more properties to be set.
props[ZFSPropVolblocksize] = Property{Value: "4096"}
props[ZFSPropReservation] = Property{Value: strSize}
props[DatasetPropVolblocksize] = Property{Value: "4096"}
props[DatasetPropReservation] = Property{Value: strSize}
// Lets create desired volume
d, err := DatasetCreate("TESTPOOL/VOLUME1", DatasetTypeVolume, props)

34
a_test.go Normal file
View File

@ -0,0 +1,34 @@
package zfs_test
import (
"testing"
)
/* ------------------------------------------------------------------------- */
// TESTS ARE DEPENDED AND MUST RUN IN DEPENDENT ORDER
func Test(t *testing.T) {
zpoolTestPoolCreate(t)
zpoolTestPoolVDevTree(t)
zpoolTestExport(t)
zpoolTestPoolImportSearch(t)
zpoolTestImport(t)
zpoolTestExportForce(t)
zpoolTestImportByGUID(t)
zpoolTestPoolProp(t)
zpoolTestPoolStatusAndState(t)
zpoolTestPoolOpenAll(t)
zpoolTestFailPoolOpen(t)
zfsTestDatasetCreate(t)
zfsTestDatasetOpen(t)
zfsTestDatasetSnapshot(t)
zfsTestDatasetOpenAll(t)
zfsTestDatasetSetProperty(t)
zfsTestDatasetDestroy(t)
zpoolTestPoolDestroy(t)
cleanupVDisks()
}

56
common.c Normal file
View File

@ -0,0 +1,56 @@
#include <libzfs.h>
#include <memory.h>
#include <string.h>
#include <stdio.h>
#include "common.h"
libzfs_handle_ptr libzfsHandle;
int go_libzfs_init() {
libzfsHandle = libzfs_init();
return 0;
}
int libzfs_last_error() {
return libzfs_errno(libzfsHandle);
}
const char *libzfs_last_error_str() {
return libzfs_error_description(libzfsHandle);
}
int libzfs_clear_last_error() {
zfs_standard_error(libzfsHandle, EZFS_SUCCESS, "success");
return 0;
}
property_list_t *new_property_list() {
property_list_t *r = malloc(sizeof(property_list_t));
memset(r, 0, sizeof(property_list_t));
return r;
}
void free_properties(property_list_t *root) {
if (root != 0) {
property_list_t *tmp = 0;
do {
tmp = root->pnext;
free(root);
root = tmp;
} while(tmp);
}
}
nvlist_ptr new_property_nvlist() {
nvlist_ptr props = NULL;
int r = nvlist_alloc(&props, NV_UNIQUE_NAME, 0);
if ( r != 0 ) {
return NULL;
}
return props;
}
int property_nvlist_add(nvlist_ptr list, const char *prop, const char *value) {
return nvlist_add_string(list, prop, value);
}

384
common.go
View File

@ -1,4 +1,4 @@
// Implements basic manipulation of ZFS pools and data sets.
// Package zfs implements basic manipulation of ZFS pools and data sets.
// Use libzfs C library instead CLI zfs tools, with goal
// to let using and manipulating OpenZFS form with in go project.
//
@ -14,6 +14,7 @@ package zfs
#include <stdlib.h>
#include <libzfs.h>
#include "common.h"
#include "zpool.h"
#include "zfs.h"
*/
@ -21,64 +22,78 @@ import "C"
import (
"errors"
"sync"
)
// VDevType type of device in the pool
type VDevType string
var libzfs_handle *C.struct_libzfs_handle
func init() {
libzfs_handle = C.libzfs_init()
C.go_libzfs_init()
return
}
// Types of Virtual Devices
const (
VDevTypeRoot VDevType = "root"
VDevTypeMirror = "mirror"
VDevTypeReplacing = "replacing"
VDevTypeRaidz = "raidz"
VDevTypeDisk = "disk"
VDevTypeFile = "file"
VDevTypeMissing = "missing"
VDevTypeHole = "hole"
VDevTypeSpare = "spare"
VDevTypeLog = "log"
VDevTypeL2cache = "l2cache"
VDevTypeRoot VDevType = "root" // VDevTypeRoot root device in ZFS pool
VDevTypeMirror = "mirror" // VDevTypeMirror mirror device in ZFS pool
VDevTypeReplacing = "replacing" // VDevTypeReplacing replacing
VDevTypeRaidz = "raidz" // VDevTypeRaidz RAIDZ device
VDevTypeDisk = "disk" // VDevTypeDisk device is disk
VDevTypeFile = "file" // VDevTypeFile device is file
VDevTypeMissing = "missing" // VDevTypeMissing missing device
VDevTypeHole = "hole" // VDevTypeHole hole
VDevTypeSpare = "spare" // VDevTypeSpare spare device
VDevTypeLog = "log" // VDevTypeLog ZIL device
VDevTypeL2cache = "l2cache" // VDevTypeL2cache cache device (disk)
)
type PoolProp int
type ZFSProp int
// Prop type to enumerate all different properties suppoerted by ZFS
type Prop int
// PoolStatus type representing status of the pool
type PoolStatus int
// PoolState type representing pool state
type PoolState uint64
// Zfs pool or dataset property
// VDevState - vdev states tye
type VDevState uint64
// VDevAux - vdev aux states
type VDevAux uint64
// Property ZFS pool or dataset property value
type Property struct {
Value string
Source string
}
var Global struct {
Mtx sync.Mutex
}
// Pool status
const (
/*
* The following correspond to faults as defined in the (fault.fs.zfs.*)
* event namespace. Each is associated with a corresponding message ID.
*/
PoolStatusCorrupt_cache PoolStatus = iota /* corrupt /kernel/drv/zpool.cache */
PoolStatusMissing_dev_r /* missing device with replicas */
PoolStatusMissing_dev_nr /* missing device with no replicas */
PoolStatusCorrupt_label_r /* bad device label with replicas */
PoolStatusCorrupt_label_nr /* bad device label with no replicas */
PoolStatusBad_guid_sum /* sum of device guids didn't match */
PoolStatusCorrupt_pool /* pool metadata is corrupted */
PoolStatusCorrupt_data /* data errors in user (meta)data */
PoolStatusFailing_dev /* device experiencing errors */
PoolStatusVersion_newer /* newer on-disk version */
PoolStatusHostid_mismatch /* last accessed by another system */
PoolStatusIo_failure_wait /* failed I/O, failmode 'wait' */
PoolStatusIo_failure_continue /* failed I/O, failmode 'continue' */
PoolStatusBad_log /* cannot read log chain(s) */
PoolStatusErrata /* informational errata available */
PoolStatusCorruptCache PoolStatus = iota /* corrupt /kernel/drv/zpool.cache */
PoolStatusMissingDevR /* missing device with replicas */
PoolStatusMissingDevNr /* missing device with no replicas */
PoolStatusCorruptLabelR /* bad device label with replicas */
PoolStatusCorruptLabelNr /* bad device label with no replicas */
PoolStatusBadGUIDSum /* sum of device guids didn't match */
PoolStatusCorruptPool /* pool metadata is corrupted */
PoolStatusCorruptData /* data errors in user (meta)data */
PoolStatusFailingDev /* device experiencing errors */
PoolStatusVersionNewer /* newer on-disk version */
PoolStatusHostidMismatch /* last accessed by another system */
PoolStatusIoFailureWait /* failed I/O, failmode 'wait' */
PoolStatusIoFailureContinue /* failed I/O, failmode 'continue' */
PoolStatusBadLog /* cannot read log chain(s) */
PoolStatusErrata /* informational errata available */
/*
* If the pool has unsupported features but can still be opened in
@ -86,27 +101,27 @@ const (
* pool has unsupported features but cannot be opened at all, its
* status is ZPOOL_STATUS_UNSUP_FEAT_READ.
*/
PoolStatusUnsup_feat_read /* unsupported features for read */
PoolStatusUnsup_feat_write /* unsupported features for write */
PoolStatusUnsupFeatRead /* unsupported features for read */
PoolStatusUnsupFeatWrite /* unsupported features for write */
/*
* These faults have no corresponding message ID. At the time we are
* checking the status, the original reason for the FMA fault (I/O or
* checksum errors) has been lost.
*/
PoolStatusFaulted_dev_r /* faulted device with replicas */
PoolStatusFaulted_dev_nr /* faulted device with no replicas */
PoolStatusFaultedDevR /* faulted device with replicas */
PoolStatusFaultedDevNr /* faulted device with no replicas */
/*
* The following are not faults per se, but still an error possibly
* requiring administrative attention. There is no corresponding
* message ID.
*/
PoolStatusVersion_older /* older legacy on-disk version */
PoolStatusFeat_disabled /* supported features are disabled */
PoolStatusResilvering /* device being resilvered */
PoolStatusOffline_dev /* device online */
PoolStatusRemoved_dev /* removed device */
PoolStatusVersionOlder /* older legacy on-disk version */
PoolStatusFeatDisabled /* supported features are disabled */
PoolStatusResilvering /* device being resilvered */
PoolStatusOfflineDev /* device online */
PoolStatusRemovedDev /* removed device */
/*
* Finally, the following indicates a healthy pool.
@ -129,12 +144,12 @@ const (
// Pool properties. Enumerates available ZFS pool properties. Use it to access
// pool properties either to read or set soecific property.
const (
PoolPropName PoolProp = iota
PoolPropName Prop = iota
PoolPropSize
PoolPropCapacity
PoolPropAltroot
PoolPropHealth
PoolPropGuid
PoolPropGUID
PoolPropVersion
PoolPropBootfs
PoolPropDelegation
@ -152,6 +167,10 @@ const (
PoolPropComment
PoolPropExpandsz
PoolPropFreeing
PoolPropFragmentaion
PoolPropLeaked
PoolPropMaxBlockSize
PoolPropTName
PoolNumProps
)
@ -162,101 +181,212 @@ const (
* the property table in module/zcommon/zfs_prop.c.
*/
const (
ZFSPropType ZFSProp = iota
ZFSPropCreation
ZFSPropUsed
ZFSPropAvailable
ZFSPropReferenced
ZFSPropCompressratio
ZFSPropMounted
ZFSPropOrigin
ZFSPropQuota
ZFSPropReservation
ZFSPropVolsize
ZFSPropVolblocksize
ZFSPropRecordsize
ZFSPropMountpoint
ZFSPropSharenfs
ZFSPropChecksum
ZFSPropCompression
ZFSPropAtime
ZFSPropDevices
ZFSPropExec
ZFSPropSetuid
ZFSPropReadonly
ZFSPropZoned
ZFSPropSnapdir
ZFSPropPrivate /* not exposed to user, temporary */
ZFSPropAclinherit
ZFSPropCreatetxg /* not exposed to the user */
ZFSPropName /* not exposed to the user */
ZFSPropCanmount
ZFSPropIscsioptions /* not exposed to the user */
ZFSPropXattr
ZFSPropNumclones /* not exposed to the user */
ZFSPropCopies
ZFSPropVersion
ZFSPropUtf8only
ZFSPropNormalize
ZFSPropCase
ZFSPropVscan
ZFSPropNbmand
ZFSPropSharesmb
ZFSPropRefquota
ZFSPropRefreservation
ZFSPropGuid
ZFSPropPrimarycache
ZFSPropSecondarycache
ZFSPropUsedsnap
ZFSPropUsedds
ZFSPropUsedchild
ZFSPropUsedrefreserv
ZFSPropUseraccounting /* not exposed to the user */
ZFSPropStmf_shareinfo /* not exposed to the user */
ZFSPropDefer_destroy
ZFSPropUserrefs
ZFSPropLogbias
ZFSPropUnique /* not exposed to the user */
ZFSPropObjsetid /* not exposed to the user */
ZFSPropDedup
ZFSPropMlslabel
ZFSPropSync
ZFSPropRefratio
ZFSPropWritten
ZFSPropClones
ZFSPropLogicalused
ZFSPropLogicalreferenced
ZFSPropInconsistent /* not exposed to the user */
ZFSPropSnapdev
ZFSPropAcltype
ZFSPropSelinux_context
ZFSPropSelinux_fscontext
ZFSPropSelinux_defcontext
ZFSPropSelinux_rootcontext
ZFSPropRelatime
ZFSPropRedundant_metadata
ZFSNumProps
DatasetPropType Prop = iota
DatasetPropCreation
DatasetPropUsed
DatasetPropAvailable
DatasetPropReferenced
DatasetPropCompressratio
DatasetPropMounted
DatasetPropOrigin
DatasetPropQuota
DatasetPropReservation
DatasetPropVolsize
DatasetPropVolblocksize
DatasetPropRecordsize
DatasetPropMountpoint
DatasetPropSharenfs
DatasetPropChecksum
DatasetPropCompression
DatasetPropAtime
DatasetPropDevices
DatasetPropExec
DatasetPropSetuid
DatasetPropReadonly
DatasetPropZoned
DatasetPropSnapdir
DatasetPropPrivate /* not exposed to user, temporary */
DatasetPropAclinherit
DatasetPropCreatetxg /* not exposed to the user */
DatasetPropName /* not exposed to the user */
DatasetPropCanmount
DatasetPropIscsioptions /* not exposed to the user */
DatasetPropXattr
DatasetPropNumclones /* not exposed to the user */
DatasetPropCopies
DatasetPropVersion
DatasetPropUtf8only
DatasetPropNormalize
DatasetPropCase
DatasetPropVscan
DatasetPropNbmand
DatasetPropSharesmb
DatasetPropRefquota
DatasetPropRefreservation
DatasetPropGUID
DatasetPropPrimarycache
DatasetPropSecondarycache
DatasetPropUsedsnap
DatasetPropUsedds
DatasetPropUsedchild
DatasetPropUsedrefreserv
DatasetPropUseraccounting /* not exposed to the user */
DatasetPropStmfShareinfo /* not exposed to the user */
DatasetPropDeferDestroy
DatasetPropUserrefs
DatasetPropLogbias
DatasetPropUnique /* not exposed to the user */
DatasetPropObjsetid /* not exposed to the user */
DatasetPropDedup
DatasetPropMlslabel
DatasetPropSync
DatasetPropRefratio
DatasetPropWritten
DatasetPropClones
DatasetPropLogicalused
DatasetPropLogicalreferenced
DatasetPropInconsistent /* not exposed to the user */
DatasetPropFilesystemLimit
DatasetPropSnapshotLimit
DatasetPropFilesystemCount
DatasetPropSnapshotCount
DatasetPropSnapdev
DatasetPropAcltype
DatasetPropSelinuxContext
DatasetPropSelinuxFsContext
DatasetPropSelinuxDefContext
DatasetPropSelinuxRootContext
DatasetPropRelatime
DatasetPropRedundantMetadata
DatasetPropOverlay
DatasetNumProps
)
// Get last underlying libzfs error description if any
// LastError get last underlying libzfs error description if any
func LastError() (err error) {
errno := C.libzfs_errno(libzfs_handle)
if errno == 0 {
return nil
}
return errors.New(C.GoString(C.libzfs_error_description(libzfs_handle)))
return errors.New(C.GoString(C.libzfs_last_error_str()))
}
// Force clear of any last error set by undeliying libzfs
// ClearLastError force clear of any last error set by undeliying libzfs
func ClearLastError() (err error) {
err = LastError()
C.clear_last_error(libzfs_handle)
C.libzfs_clear_last_error()
return
}
func boolean_t(b bool) (r C.boolean_t) {
func booleanT(b bool) (r C.boolean_t) {
if b {
return 1
}
return 0
}
// ZFS errors
const (
ESuccess = 0 /* no error -- success */
ENomem = 2000 << iota /* out of memory */
EBadprop /* invalid property value */
EPropreadonly /* cannot set readonly property */
EProptype /* property does not apply to dataset type */
EPropnoninherit /* property is not inheritable */
EPropspace /* bad quota or reservation */
EBadtype /* dataset is not of appropriate type */
EBusy /* pool or dataset is busy */
EExists /* pool or dataset already exists */
ENoent /* no such pool or dataset */
EBadstream /* bad backup stream */
EDsreadonly /* dataset is readonly */
EVoltoobig /* volume is too large for 32-bit system */
EInvalidname /* invalid dataset name */
EBadrestore /* unable to restore to destination */
EBadbackup /* backup failed */
EBadtarget /* bad attach/detach/replace target */
ENodevice /* no such device in pool */
EBaddev /* invalid device to add */
ENoreplicas /* no valid replicas */
EResilvering /* currently resilvering */
EBadversion /* unsupported version */
EPoolunavail /* pool is currently unavailable */
EDevoverflow /* too many devices in one vdev */
EBadpath /* must be an absolute path */
ECrosstarget /* rename or clone across pool or dataset */
EZoned /* used improperly in local zone */
EMountfailed /* failed to mount dataset */
EUmountfailed /* failed to unmount dataset */
EUnsharenfsfailed /* unshare(1M) failed */
ESharenfsfailed /* share(1M) failed */
EPerm /* permission denied */
ENospc /* out of space */
EFault /* bad address */
EIo /* I/O error */
EIntr /* signal received */
EIsspare /* device is a hot spare */
EInvalconfig /* invalid vdev configuration */
ERecursive /* recursive dependency */
ENohistory /* no history object */
EPoolprops /* couldn't retrieve pool props */
EPoolNotsup /* ops not supported for this type of pool */
EPoolInvalarg /* invalid argument for this pool operation */
ENametoolong /* dataset name is too long */
EOpenfailed /* open of device failed */
ENocap /* couldn't get capacity */
ELabelfailed /* write of label failed */
EBadwho /* invalid permission who */
EBadperm /* invalid permission */
EBadpermset /* invalid permission set name */
ENodelegation /* delegated administration is disabled */
EUnsharesmbfailed /* failed to unshare over smb */
ESharesmbfailed /* failed to share over smb */
EBadcache /* bad cache file */
EIsl2CACHE /* device is for the level 2 ARC */
EVdevnotsup /* unsupported vdev type */
ENotsup /* ops not supported on this dataset */
EActiveSpare /* pool has active shared spare devices */
EUnplayedLogs /* log device has unplayed logs */
EReftagRele /* snapshot release: tag not found */
EReftagHold /* snapshot hold: tag already exists */
ETagtoolong /* snapshot hold/rele: tag too long */
EPipefailed /* pipe create failed */
EThreadcreatefailed /* thread create failed */
EPostsplitOnline /* onlining a disk after splitting it */
EScrubbing /* currently scrubbing */
ENoScrub /* no active scrub */
EDiff /* general failure of zfs diff */
EDiffdata /* bad zfs diff data */
EPoolreadonly /* pool is in read-only mode */
EUnknown
)
// vdev states are ordered from least to most healthy.
// A vdev that's VDevStateCantOpen or below is considered unusable.
const (
VDevStateUnknown VDevState = iota // Uninitialized vdev
VDevStateClosed // Not currently open
VDevStateOffline // Not allowed to open
VDevStateRemoved // Explicitly removed from system
VDevStateCantOpen // Tried to open, but failed
VDevStateFaulted // External request to fault device
VDevStateDegraded // Replicated vdev with unhealthy kids
VDevStateHealthy // Presumed good
)
// vdev aux states. When a vdev is in the VDevStateCantOpen state, the aux field
// of the vdev stats structure uses these constants to distinguish why.
const (
VDevAuxNone VDevAux = iota // no error
VDevAuxOpenFailed // ldi_open_*() or vn_open() failed
VDevAuxCorruptData // bad label or disk contents
VDevAuxNoReplicas // insufficient number of replicas
VDevAuxBadGUIDSum // vdev guid sum doesn't match
VDevAuxTooSmall // vdev size is too small
VDevAuxBadLabel // the label is OK but invalid
VDevAuxVersionNewer // on-disk version is too new
VDevAuxVersionOlder // on-disk version is too old
VDevAuxUnsupFeat // unsupported features
VDevAuxSpared // hot spare used in another pool
VDevAuxErrExceeded // too many errors
VDevAuxIOFailure // experienced I/O failure
VDevAuxBadLog // cannot read log chain(s)
VDevAuxExternal // external diagnosis
VDevAuxSplitPool // vdev was split off into another pool
)

37
common.h Normal file
View File

@ -0,0 +1,37 @@
/* C wrappers around some zfs calls and C in general that should simplify
* using libzfs from go language, make go code shorter and more readable.
*/
#define INT_MAX_NAME 256
#define INT_MAX_VALUE 1024
#define ZAP_OLDMAXVALUELEN 1024
#define ZFS_MAX_DATASET_NAME_LEN 256
typedef struct property_list {
char value[INT_MAX_VALUE];
char source[ZFS_MAX_DATASET_NAME_LEN];
int property;
void *pnext;
} property_list_t;
typedef struct libzfs_handle* libzfs_handle_ptr;
typedef struct nvlist* nvlist_ptr;
typedef struct property_list *property_list_ptr;
typedef struct nvpair* nvpair_ptr;
typedef struct vdev_stat* vdev_stat_ptr;
typedef char* char_ptr;
extern libzfs_handle_ptr libzfsHandle;
int go_libzfs_init();
int libzfs_last_error();
const char *libzfs_last_error_str();
int libzfs_clear_last_error();
property_list_t *new_property_list();
void free_properties(property_list_t *root);
nvlist_ptr new_property_nvlist();
int property_nvlist_add(nvlist_ptr ptr, const char* prop, const char *value);

269
sendrecv.go Normal file
View File

@ -0,0 +1,269 @@
package zfs
// #include <stdlib.h>
// #include <libzfs.h>
// #include "common.h"
// #include "zpool.h"
// #include "zfs.h"
import "C"
import (
"fmt"
"os"
"path"
"strings"
"syscall"
"unsafe"
)
type SendFlags struct {
Verbose bool
Replicate bool
DoAll bool
FromOrigin bool
Dedup bool
Props bool
DryRun bool
// Parsable bool
// Progress bool
LargeBlock bool
EmbedData bool
// Compress bool
}
type RecvFlags struct {
Verbose bool
IsPrefix bool
IsTail bool
DryRun bool
Force bool
CanmountOff bool
Resumable bool
ByteSwap bool
NoMount bool
}
func to_boolean_t(a bool) C.boolean_t {
if a {
return 1
}
return 0
}
func to_sendflags_t(flags *SendFlags) (cflags *C.sendflags_t) {
cflags = C.alloc_sendflags()
cflags.verbose = to_boolean_t(flags.Verbose)
cflags.replicate = to_boolean_t(flags.Replicate)
cflags.doall = to_boolean_t(flags.DoAll)
cflags.fromorigin = to_boolean_t(flags.FromOrigin)
cflags.dedup = to_boolean_t(flags.Dedup)
cflags.props = to_boolean_t(flags.Props)
cflags.dryrun = to_boolean_t(flags.DryRun)
// cflags.parsable = to_boolean_t(flags.Parsable)
// cflags.progress = to_boolean_t(flags.Progress)
cflags.largeblock = to_boolean_t(flags.LargeBlock)
cflags.embed_data = to_boolean_t(flags.EmbedData)
// cflags.compress = to_boolean_t(flags.Compress)
return
}
func to_recvflags_t(flags *RecvFlags) (cflags *C.recvflags_t) {
cflags = C.alloc_recvflags()
cflags.verbose = to_boolean_t(flags.Verbose)
cflags.isprefix = to_boolean_t(flags.IsPrefix)
cflags.istail = to_boolean_t(flags.IsTail)
cflags.dryrun = to_boolean_t(flags.DryRun)
cflags.force = to_boolean_t(flags.Force)
cflags.canmountoff = to_boolean_t(flags.CanmountOff)
// cflags.resumable = to_boolean_t(flags.Resumable)
cflags.byteswap = to_boolean_t(flags.ByteSwap)
cflags.nomount = to_boolean_t(flags.NoMount)
return
}
func (d *Dataset) send(FromName string, outf *os.File, flags *SendFlags) (err error) {
var cfromname, ctoname *C.char
var dpath string
var pd Dataset
if d.Type != DatasetTypeSnapshot || (len(FromName) > 0 && strings.Contains(FromName, "#")) {
err = fmt.Errorf(
"Unsupported method on filesystem or bookmark. Use func SendOne() for that purpose.")
return
}
cflags := to_sendflags_t(flags)
defer C.free(unsafe.Pointer(cflags))
if dpath, err = d.Path(); err != nil {
return
}
if len(FromName) > 0 {
if FromName[0] == '#' || FromName[0] == '@' {
FromName = dpath + FromName
}
cfromname = C.CString(FromName)
defer C.free(unsafe.Pointer(cfromname))
}
sendparams := strings.Split(dpath, "@")
parent := sendparams[0]
ctoname = C.CString(sendparams[1])
defer C.free(unsafe.Pointer(ctoname))
if pd, err = DatasetOpen(parent); err != nil {
return
}
defer pd.Close()
cerr := C.zfs_send(pd.list.zh, cfromname, ctoname, cflags, C.int(outf.Fd()), nil, nil, nil)
if cerr != 0 {
err = LastError()
}
return
}
func (d *Dataset) SendOne(FromName string, outf *os.File, flags *SendFlags) (err error) {
var cfromname, ctoname *C.char
var dpath string
var lzc_send_flags uint32
if d.Type == DatasetTypeSnapshot || (len(FromName) > 0 && !strings.Contains(FromName, "#")) {
err = fmt.Errorf(
"Unsupported with snapshot. Use func Send() for that purpose.")
return
}
if flags.Replicate || flags.DoAll || flags.Props || flags.Dedup || flags.DryRun {
err = fmt.Errorf("Unsupported flag with filesystem or bookmark.")
return
}
if flags.LargeBlock {
lzc_send_flags |= C.LZC_SEND_FLAG_LARGE_BLOCK
}
if flags.EmbedData {
lzc_send_flags |= C.LZC_SEND_FLAG_EMBED_DATA
}
// if (flags.Compress)
// lzc_send_flags |= LZC_SEND_FLAG_COMPRESS;
if dpath, err = d.Path(); err != nil {
return
}
if len(FromName) > 0 {
if FromName[0] == '#' || FromName[0] == '@' {
FromName = dpath + FromName
}
cfromname = C.CString(FromName)
defer C.free(unsafe.Pointer(cfromname))
}
ctoname = C.CString(path.Base(dpath))
defer C.free(unsafe.Pointer(ctoname))
cerr := C.zfs_send_one(d.list.zh, cfromname, C.int(outf.Fd()), lzc_send_flags)
if cerr != 0 {
err = LastError()
}
return
}
func (d *Dataset) Send(outf *os.File, flags SendFlags) (err error) {
if flags.Replicate {
flags.DoAll = true
}
err = d.send("", outf, &flags)
return
}
func (d *Dataset) SendFrom(FromName string, outf *os.File, flags SendFlags) (err error) {
var porigin Property
var from, dest []string
if err = d.ReloadProperties(); err != nil {
return
}
porigin, _ = d.GetProperty(DatasetPropOrigin)
if len(porigin.Value) > 0 && porigin.Value == FromName {
FromName = ""
flags.FromOrigin = true
} else {
var dpath string
if dpath, err = d.Path(); err != nil {
return
}
dest = strings.Split(dpath, "@")
from = strings.Split(FromName, "@")
if len(from[0]) > 0 && from[0] != dest[0] {
err = fmt.Errorf("Incremental source must be in same filesystem.")
return
}
if len(from) < 2 || strings.Contains(from[1], "@") || strings.Contains(from[1], "/") {
err = fmt.Errorf("Invalid incremental source.")
return
}
}
err = d.send(from[1], outf, &flags)
return
}
func (d *Dataset) SendSize(FromName string, flags SendFlags) (size uint64, err error) {
var porigin Property
var from Dataset
var dpath string
if dpath, err = d.Path(); err != nil {
return
}
zc := C.new_zfs_cmd()
defer C.free(unsafe.Pointer(zc))
dpath = strings.Split(dpath, "@")[0]
if len(FromName) > 0 {
if FromName[0] == '#' || FromName[0] == '@' {
FromName = dpath + FromName
}
porigin, _ = d.GetProperty(DatasetPropOrigin)
if len(porigin.Value) > 0 && porigin.Value == FromName {
FromName = ""
flags.FromOrigin = true
}
if from, err = DatasetOpen(FromName); err != nil {
return
}
zc.zc_fromobj = C.zfs_prop_get_int(from.list.zh, C.ZFS_PROP_OBJSETID)
from.Close()
} else {
zc.zc_fromobj = 0
}
zc.zc_obj = C.uint64_t(to_boolean_t(flags.FromOrigin))
zc.zc_sendobj = C.zfs_prop_get_int(d.list.zh, C.ZFS_PROP_OBJSETID)
zc.zc_guid = 1
zc.zc_flags = 0
if flags.LargeBlock {
zc.zc_flags |= C.LZC_SEND_FLAG_LARGE_BLOCK
}
if flags.EmbedData {
zc.zc_flags |= C.LZC_SEND_FLAG_EMBED_DATA
}
// C.estimate_ioctl(d.list.zhp, prevsnap_obj, to_boolean_t(flags.FromOrigin), lzc_send_flags, unsafe.Pointer(&size))
if ec, e := C.estimate_send_size(zc); ec != 0 {
err = fmt.Errorf("Failed to estimate send size. %s %d", e.Error(), e.(syscall.Errno))
}
size = uint64(zc.zc_objset_type)
return
}
func (d *Dataset) Receive(inf *os.File, flags RecvFlags) (err error) {
var dpath string
if dpath, err = d.Path(); err != nil {
return
}
props := C.new_property_nvlist()
if props == nil {
err = fmt.Errorf("Out of memory func (d *Dataset) Recv()")
return
}
defer C.nvlist_free(props)
cflags := to_recvflags_t(&flags)
defer C.free(unsafe.Pointer(cflags))
dest := C.CString(dpath)
defer C.free(unsafe.Pointer(dest))
ec := C.zfs_receive(C.libzfsHandle, dest, cflags, C.int(inf.Fd()), nil)
if ec != 0 {
err = fmt.Errorf("ZFS receive of %s failed. %s", C.GoString(dest), LastError().Error())
}
return
}

231
zfs.c
View File

@ -1,12 +1,13 @@
/* C wrappers around some zfs calls and C in general that should simplify
* using libzfs from go language, make go code shorter and more readable.
*/
#include <libzfs.h>
#include <memory.h>
#include <string.h>
#include <stdio.h>
#include "common.h"
#include "zpool.h"
#include "zfs.h"
@ -18,8 +19,23 @@ dataset_list_t *create_dataset_list_item() {
}
void dataset_list_close(dataset_list_t *list) {
zfs_close(list->zh);
free(list);
if (list != NULL) {
if (list->zh != NULL) {
zfs_close(list->zh);
list->zh = NULL;
}
free(list);
}
// dataset_list_free(list);
}
void dataset_list_free(dataset_list_t *list) {
dataset_list_t *next;
while(list) {
next = list->pnext;
free(list);
list = next;
}
}
int dataset_list_callb(zfs_handle_t *dataset, void *data) {
@ -36,61 +52,210 @@ int dataset_list_callb(zfs_handle_t *dataset, void *data) {
return 0;
}
int dataset_list_root(libzfs_handle_t *libzfs, dataset_list_t **first) {
dataset_list_ptr dataset_list_root() {
int err = 0;
dataset_list_t *zlist = create_dataset_list_item();
err = zfs_iter_root(libzfs, dataset_list_callb, &zlist);
if ( zlist->zh ) {
*first = zlist;
} else {
*first = 0;
free(zlist);
err = zfs_iter_root(libzfsHandle, dataset_list_callb, &zlist);
if ( err != 0 || zlist->zh == NULL) {
dataset_list_free(zlist);
return NULL;
}
return err;
return zlist;
}
dataset_list_t *dataset_next(dataset_list_t *dataset) {
dataset_list_ptr dataset_next(dataset_list_t *dataset) {
return dataset->pnext;
}
int dataset_list_children(zfs_handle_t *zfs, dataset_list_t **first) {
int err = 0;
dataset_list_t *zlist = create_dataset_list_item();
err = zfs_iter_children(zfs, dataset_list_callb, &zlist);
if ( zlist->zh ) {
*first = zlist;
} else {
*first = 0;
free(zlist);
}
return err;
int dataset_type(dataset_list_ptr dataset) {
return zfs_get_type(dataset->zh);
}
int read_dataset_property(zfs_handle_t *zh, property_list_t *list, int prop) {
dataset_list_ptr dataset_open(const char *path) {
dataset_list_ptr list = create_dataset_list_item();
list->zh = zfs_open(libzfsHandle, path, 0xF);
if (list->zh == NULL) {
dataset_list_free(list);
list = NULL;
}
return list;
}
int dataset_create(const char *path, zfs_type_t type, nvlist_ptr props) {
return zfs_create(libzfsHandle, path, type, props);
}
int dataset_destroy(dataset_list_ptr dataset, boolean_t defer) {
return zfs_destroy(dataset->zh, defer);
}
dataset_list_t *dataset_list_children(dataset_list_t *dataset) {
int err = 0;
dataset_list_t *zlist = create_dataset_list_item();
err = zfs_iter_children(dataset->zh, dataset_list_callb, &zlist);
if ( err != 0 || zlist->zh == NULL) {
dataset_list_free(zlist);
return NULL;
}
return zlist;
}
zpool_list_ptr dataset_get_pool(dataset_list_ptr dataset) {
zpool_list_ptr pool = create_zpool_list_item();
if(pool != NULL) {
pool->zph = zfs_get_pool_handle(dataset->zh);
}
return pool;
}
int dataset_prop_set(dataset_list_ptr dataset, zfs_prop_t prop, const char *value) {
return zfs_prop_set(dataset->zh, zfs_prop_to_name(prop), value);
}
int dataset_user_prop_set(dataset_list_ptr dataset, const char *prop, const char *value) {
return zfs_prop_set(dataset->zh, prop, value);
}
int dataset_clone(dataset_list_ptr dataset, const char *target, nvlist_ptr props) {
return zfs_clone(dataset->zh, target, props);
}
int dataset_snapshot(const char *path, boolean_t recur, nvlist_ptr props) {
return zfs_snapshot(libzfsHandle, path, recur, props);
}
int dataset_rollback(dataset_list_ptr dataset, dataset_list_ptr snapshot, boolean_t force) {
return zfs_rollback(dataset->zh, snapshot->zh, force);
}
int dataset_promote(dataset_list_ptr dataset) {
return zfs_promote(dataset->zh);
}
int dataset_rename(dataset_list_ptr dataset, const char* new_name, boolean_t recur, boolean_t force_unm) {
return zfs_rename(dataset->zh, new_name, recur, force_unm);
}
const char *dataset_is_mounted(dataset_list_ptr dataset){
char *mp = NULL;
// zfs_is_mounted returns B_TRUE or B_FALSE
if (0 != zfs_is_mounted(dataset->zh, &mp)) {
return mp;
}
return NULL;
}
int dataset_mount(dataset_list_ptr dataset, const char *options, int flags) {
if ( 0 < strlen(options)) {
return zfs_mount(dataset->zh, options, flags);
} else {
return zfs_mount(dataset->zh, NULL, flags);
}
}
int dataset_unmount(dataset_list_ptr dataset, int flags) {
return zfs_unmount(dataset->zh, NULL, flags);
}
int dataset_unmountall(dataset_list_ptr dataset, int flags) {
return zfs_unmountall(dataset->zh, flags);
}
const char *dataset_get_name(dataset_list_ptr ds) {
return zfs_get_name(ds->zh);
}
//int read_dataset_property(zfs_handle_t *zh, property_list_t *list, int prop) {
property_list_t *read_dataset_property(dataset_list_t *dataset, int prop) {
int r = 0;
zprop_source_t source;
char statbuf[INT_MAX_VALUE];
property_list_ptr list = NULL;
list = new_property_list();
r = zfs_prop_get(zh, prop,
r = zfs_prop_get(dataset->zh, prop,
list->value, INT_MAX_VALUE, &source, statbuf, INT_MAX_VALUE, 1);
if (r == 0) {
if (r == 0 && list != NULL) {
// strcpy(list->name, zpool_prop_to_name(prop));
zprop_source_tostr(list->source, source);
list->property = (int)prop;
} else if (list != NULL) {
free_properties(list);
list = NULL;
}
list->property = (int)prop;
return r;
return list;
}
int clear_last_error(libzfs_handle_t *hdl) {
zfs_standard_error(hdl, EZFS_SUCCESS, "success");
return 0;
// int read_user_property(zfs_handle_t *zh, property_list_t *list, const char *prop) {
property_list_t *read_user_property(dataset_list_t *dataset, const char* prop) {
nvlist_t *user_props = zfs_get_user_props(dataset->zh);
nvlist_t *propval;
zprop_source_t sourcetype;
char *strval;
char *sourceval;
// char source[ZFS_MAX_DATASET_NAME_LEN];
property_list_ptr list = new_property_list();
if (nvlist_lookup_nvlist(user_props,
prop, &propval) != 0) {
sourcetype = ZPROP_SRC_NONE;
(void) strncpy(list->source,
"none", sizeof (list->source));
strval = "-";
} else {
verify(nvlist_lookup_string(propval,
ZPROP_VALUE, &strval) == 0);
verify(nvlist_lookup_string(propval,
ZPROP_SOURCE, &sourceval) == 0);
if (strcmp(sourceval,
zfs_get_name(dataset->zh)) == 0) {
sourcetype = ZPROP_SRC_LOCAL;
(void) strncpy(list->source,
"local", sizeof (list->source));
} else if (strcmp(sourceval,
ZPROP_SOURCE_VAL_RECVD) == 0) {
sourcetype = ZPROP_SRC_RECEIVED;
(void) strncpy(list->source,
"received", sizeof (list->source));
} else {
sourcetype = ZPROP_SRC_INHERITED;
(void) strncpy(list->source,
sourceval, sizeof (list->source));
}
}
(void) strncpy(list->value,
strval, sizeof (list->value));
return list;
}
char** alloc_strings(int size) {
char** alloc_cstrings(int size) {
return malloc(size*sizeof(char*));
}
void strings_setat(char **a, int at, char *v) {
a[at] = v;
}
sendflags_t *alloc_sendflags() {
sendflags_t *r = malloc(sizeof(sendflags_t));
memset(r, 0, sizeof(sendflags_t));
return r;
}
recvflags_t *alloc_recvflags() {
recvflags_t *r = malloc(sizeof(recvflags_t));
memset(r, 0, sizeof(recvflags_t));
return r;
}
struct zfs_cmd *new_zfs_cmd(){
struct zfs_cmd *cmd = malloc(sizeof(struct zfs_cmd));
memset(cmd, 0, sizeof(struct zfs_cmd));
return cmd;
}
int estimate_send_size(struct zfs_cmd *zc) {
return zfs_ioctl(libzfsHandle, ZFS_IOC_SEND, zc);
}

375
zfs.go
View File

@ -2,54 +2,64 @@ package zfs
// #include <stdlib.h>
// #include <libzfs.h>
// #include "common.h"
// #include "zpool.h"
// #include "zfs.h"
import "C"
import (
"errors"
"fmt"
"strings"
"unsafe"
)
const (
msgDatasetIsNil = "Dataset handle not initialized or its closed"
)
// DatasetProperties type is map of dataset or volume properties prop -> value
type DatasetProperties map[Prop]string
// DatasetType defines enum of dataset types
type DatasetType int32
const (
// DatasetTypeFilesystem - file system dataset
DatasetTypeFilesystem DatasetType = (1 << 0)
DatasetTypeSnapshot = (1 << 1)
DatasetTypeVolume = (1 << 2)
DatasetTypePool = (1 << 3)
DatasetTypeBookmark = (1 << 4)
// DatasetTypeSnapshot - snapshot of dataset
DatasetTypeSnapshot = (1 << 1)
// DatasetTypeVolume - volume (virtual block device) dataset
DatasetTypeVolume = (1 << 2)
// DatasetTypePool - pool dataset
DatasetTypePool = (1 << 3)
// DatasetTypeBookmark - bookmark dataset
DatasetTypeBookmark = (1 << 4)
)
// Dataset - ZFS dataset object
type Dataset struct {
list *C.dataset_list_t
list C.dataset_list_ptr
Type DatasetType
Properties map[ZFSProp]Property
Properties map[Prop]Property
Children []Dataset
}
func (d *Dataset) openChildren() (err error) {
var dataset Dataset
d.Children = make([]Dataset, 0, 5)
errcode := C.dataset_list_children(d.list.zh, &(dataset.list))
for dataset.list != nil {
dataset.Type = DatasetType(C.zfs_get_type(dataset.list.zh))
dataset.Properties = make(map[ZFSProp]Property)
list := C.dataset_list_children(d.list)
for list != nil {
dataset := Dataset{list: list}
dataset.Type = DatasetType(C.dataset_type(d.list))
dataset.Properties = make(map[Prop]Property)
err = dataset.ReloadProperties()
if err != nil {
return
}
d.Children = append(d.Children, dataset)
dataset.list = C.dataset_next(dataset.list)
list = C.dataset_next(list)
}
if errcode != 0 {
err = LastError()
return
}
for ci, _ := range d.Children {
for ci := range d.Children {
if err = d.Children[ci].openChildren(); err != nil {
return
}
@ -57,13 +67,13 @@ func (d *Dataset) openChildren() (err error) {
return
}
// Recursive get handles to all available datasets on system
// DatasetOpenAll recursive get handles to all available datasets on system
// (file-systems, volumes or snapshots).
func DatasetOpenAll() (datasets []Dataset, err error) {
var dataset Dataset
errcode := C.dataset_list_root(libzfs_handle, &dataset.list)
dataset.list = C.dataset_list_root()
for dataset.list != nil {
dataset.Type = DatasetType(C.zfs_get_type(dataset.list.zh))
dataset.Type = DatasetType(C.dataset_type(dataset.list))
err = dataset.ReloadProperties()
if err != nil {
return
@ -71,11 +81,7 @@ func DatasetOpenAll() (datasets []Dataset, err error) {
datasets = append(datasets, dataset)
dataset.list = C.dataset_next(dataset.list)
}
if errcode != 0 {
err = LastError()
return
}
for ci, _ := range datasets {
for ci := range datasets {
if err = datasets[ci].openChildren(); err != nil {
return
}
@ -83,24 +89,30 @@ func DatasetOpenAll() (datasets []Dataset, err error) {
return
}
// Close all datasets in slice and all of its recursive children datasets
// DatasetCloseAll close all datasets in slice and all of its recursive
// children datasets
func DatasetCloseAll(datasets []Dataset) {
for _, d := range datasets {
d.Close()
}
}
// Open dataset and all of its recursive children datasets
// DatasetOpen open dataset and all of its recursive children datasets
func DatasetOpen(path string) (d Dataset, err error) {
d.list = C.create_dataset_list_item()
d.list.zh = C.zfs_open(libzfs_handle, C.CString(path), 0xF)
csPath := C.CString(path)
d.list = C.dataset_open(csPath)
C.free(unsafe.Pointer(csPath))
if d.list.zh == nil {
if d.list == nil || d.list.zh == nil {
err = LastError()
if err == nil {
err = fmt.Errorf("dataset not found.")
}
err = fmt.Errorf("%s - %s", err.Error(), path)
return
}
d.Type = DatasetType(C.zfs_get_type(d.list.zh))
d.Properties = make(map[ZFSProp]Property)
d.Type = DatasetType(C.dataset_type(d.list))
d.Properties = make(map[Prop]Property)
err = d.ReloadProperties()
if err != nil {
return
@ -109,18 +121,19 @@ func DatasetOpen(path string) (d Dataset, err error) {
return
}
func datasetPropertiesTo_nvlist(props map[ZFSProp]Property) (
cprops *C.nvlist_t, err error) {
func datasetPropertiesTonvlist(props map[Prop]Property) (
cprops C.nvlist_ptr, err error) {
// convert properties to nvlist C type
r := C.nvlist_alloc(&cprops, C.NV_UNIQUE_NAME, 0)
if r != 0 {
cprops = C.new_property_nvlist()
if cprops == nil {
err = errors.New("Failed to allocate properties")
return
}
for prop, value := range props {
r := C.nvlist_add_string(
cprops, C.zfs_prop_to_name(
C.zfs_prop_t(prop)), C.CString(value.Value))
csValue := C.CString(value.Value)
r := C.property_nvlist_add(
cprops, C.zfs_prop_to_name(C.zfs_prop_t(prop)), csValue)
C.free(unsafe.Pointer(csValue))
if r != 0 {
err = errors.New("Failed to convert property")
return
@ -129,36 +142,57 @@ func datasetPropertiesTo_nvlist(props map[ZFSProp]Property) (
return
}
// Create a new filesystem or volume on path representing pool/dataset or pool/parent/dataset
// DatasetCreate create a new filesystem or volume on path representing
// pool/dataset or pool/parent/dataset
func DatasetCreate(path string, dtype DatasetType,
props map[ZFSProp]Property) (d Dataset, err error) {
var cprops *C.nvlist_t
if cprops, err = datasetPropertiesTo_nvlist(props); err != nil {
props map[Prop]Property) (d Dataset, err error) {
var cprops C.nvlist_ptr
if cprops, err = datasetPropertiesTonvlist(props); err != nil {
return
}
defer C.nvlist_free(cprops)
errcode := C.zfs_create(libzfs_handle, C.CString(path),
C.zfs_type_t(dtype), cprops)
csPath := C.CString(path)
errcode := C.dataset_create(csPath, C.zfs_type_t(dtype), cprops)
C.free(unsafe.Pointer(csPath))
if errcode != 0 {
err = LastError()
return
}
return
return DatasetOpen(path)
}
// Close dataset and all its recursive children datasets (close handle and cleanup dataset object/s from memory)
// Close close dataset and all its recursive children datasets (close handle
// and cleanup dataset object/s from memory)
func (d *Dataset) Close() {
if d.list != nil && d.list.zh != nil {
C.dataset_list_close(d.list)
}
// path, _ := d.Path()
C.dataset_list_close(d.list)
d.list = nil
for _, cd := range d.Children {
cd.Close()
}
}
// Destroy destroys the dataset. The caller must make sure that the filesystem
// isn't mounted, and that there are no active dependents. Set Defer argument
// to true to defer destruction for when dataset is not in use. Call Close() to
// cleanup memory.
func (d *Dataset) Destroy(Defer bool) (err error) {
if len(d.Children) > 0 {
path, e := d.Path()
if e != nil {
return
}
dsType, e := d.GetProperty(DatasetPropType)
if e != nil {
dsType.Value = err.Error() // just put error (why it didn't fetch property type)
}
err = errors.New("Cannot destroy dataset " + path +
": " + dsType.Value + " has children")
return
}
if d.list != nil {
if ec := C.zfs_destroy(d.list.zh, boolean_t(Defer)); ec != 0 {
if ec := C.dataset_destroy(d.list, booleanT(Defer)); ec != 0 {
err = LastError()
}
} else {
@ -167,14 +201,64 @@ func (d *Dataset) Destroy(Defer bool) (err error) {
return
}
// DestroyRecursive recursively destroy children of dataset and dataset.
func (d *Dataset) DestroyRecursive() (err error) {
var path string
if path, err = d.Path(); err != nil {
return
}
if !strings.Contains(path, "@") { // not snapshot
if len(d.Children) > 0 {
for _, c := range d.Children {
if err = c.DestroyRecursive(); err != nil {
return
}
// close handle to destroyed child dataset
c.Close()
}
// clear closed children array
d.Children = make([]Dataset, 0)
}
err = d.Destroy(false)
} else {
var parent Dataset
tmp := strings.Split(path, "@")
ppath, snapname := tmp[0], tmp[1]
if parent, err = DatasetOpen(ppath); err != nil {
return
}
defer parent.Close()
if len(parent.Children) > 0 {
for _, c := range parent.Children {
if path, err = c.Path(); err != nil {
return
}
if strings.Contains(path, "@") {
continue // skip other snapshots
}
if c, err = DatasetOpen(path + "@" + snapname); err != nil {
continue
}
if err = c.DestroyRecursive(); err != nil {
c.Close()
return
}
c.Close()
}
}
err = d.Destroy(false)
}
return
}
// Pool returns pool dataset belongs to
func (d *Dataset) Pool() (p Pool, err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
p.list = C.create_zpool_list_item()
p.list.zph = C.zfs_get_pool_handle(d.list.zh)
if p.list != nil {
p.list = C.dataset_get_pool(d.list)
if p.list != nil && p.list.zph != nil {
err = p.ReloadProperties()
return
}
@ -182,76 +266,116 @@ func (d *Dataset) Pool() (p Pool, err error) {
return
}
// ReloadProperties re-read dataset's properties
func (d *Dataset) ReloadProperties() (err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
var plist *C.property_list_t
plist = C.new_property_list()
defer C.free_properties(plist)
d.Properties = make(map[ZFSProp]Property)
for prop := ZFSPropType; prop < ZFSNumProps; prop++ {
errcode := C.read_dataset_property(d.list.zh, plist, C.int(prop))
if errcode != 0 {
d.Properties = make(map[Prop]Property)
Global.Mtx.Lock()
defer Global.Mtx.Unlock()
for prop := DatasetPropType; prop < DatasetNumProps; prop++ {
plist := C.read_dataset_property(d.list, C.int(prop))
if plist == nil {
continue
}
d.Properties[prop] = Property{Value: C.GoString(&(*plist).value[0]),
Source: C.GoString(&(*plist).source[0])}
C.free_properties(plist)
}
return
}
// Reload and return single specified property. This also reloads requested
// GetProperty reload and return single specified property. This also reloads requested
// property in Properties map.
func (d *Dataset) GetProperty(p ZFSProp) (prop Property, err error) {
func (d *Dataset) GetProperty(p Prop) (prop Property, err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
var plist *C.property_list_t
plist = C.new_property_list()
defer C.free_properties(plist)
errcode := C.read_dataset_property(d.list.zh, plist, C.int(p))
if errcode != 0 {
plist := C.read_dataset_property(d.list, C.int(p))
if plist == nil {
err = LastError()
return
}
defer C.free_properties(plist)
prop = Property{Value: C.GoString(&(*plist).value[0]),
Source: C.GoString(&(*plist).source[0])}
d.Properties[p] = prop
return
}
// Set ZFS dataset property to value. Not all properties can be set,
// some can be set only at creation time and some are read only.
// Always check if returned error and its description.
func (d *Dataset) SetProperty(p ZFSProp, value string) (err error) {
func (d *Dataset) GetUserProperty(p string) (prop Property, err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
errcode := C.zfs_prop_set(d.list.zh, C.zfs_prop_to_name(
C.zfs_prop_t(p)), C.CString(value))
csp := C.CString(p)
defer C.free(unsafe.Pointer(csp))
plist := C.read_user_property(d.list, csp)
if plist == nil {
err = LastError()
return
}
defer C.free_properties(plist)
prop = Property{Value: C.GoString(&(*plist).value[0]),
Source: C.GoString(&(*plist).source[0])}
return
}
// SetProperty set ZFS dataset property to value. Not all properties can be set,
// some can be set only at creation time and some are read only.
// Always check if returned error and its description.
func (d *Dataset) SetProperty(p Prop, value string) (err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
csValue := C.CString(value)
errcode := C.dataset_prop_set(d.list, C.zfs_prop_t(p), csValue)
C.free(unsafe.Pointer(csValue))
if errcode != 0 {
err = LastError()
}
// Update Properties member with change made
if _, err = d.GetProperty(p); err != nil {
return
}
return
}
func (d *Dataset) SetUserProperty(prop, value string) (err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
csValue := C.CString(value)
csProp := C.CString(prop)
errcode := C.dataset_user_prop_set(d.list, csProp, csValue)
C.free(unsafe.Pointer(csValue))
C.free(unsafe.Pointer(csProp))
if errcode != 0 {
err = LastError()
}
return
}
// Clones the dataset. The target must be of the same type as
// Clone - clones the dataset. The target must be of the same type as
// the source.
func (d *Dataset) Clone(target string, props map[ZFSProp]Property) (rd Dataset, err error) {
var cprops *C.nvlist_t
func (d *Dataset) Clone(target string, props map[Prop]Property) (rd Dataset, err error) {
var cprops C.nvlist_ptr
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
if cprops, err = datasetPropertiesTo_nvlist(props); err != nil {
if cprops, err = datasetPropertiesTonvlist(props); err != nil {
return
}
defer C.nvlist_free(cprops)
if errc := C.zfs_clone(d.list.zh, C.CString(target), cprops); errc != 0 {
csTarget := C.CString(target)
defer C.free(unsafe.Pointer(csTarget))
if errc := C.dataset_clone(d.list, csTarget, cprops); errc != 0 {
err = LastError()
return
}
@ -259,14 +383,16 @@ func (d *Dataset) Clone(target string, props map[ZFSProp]Property) (rd Dataset,
return
}
// Create dataset snapshot
func DatasetSnapshot(path string, recur bool, props map[ZFSProp]Property) (rd Dataset, err error) {
var cprops *C.nvlist_t
if cprops, err = datasetPropertiesTo_nvlist(props); err != nil {
// DatasetSnapshot create dataset snapshot. Set recur to true to snapshot child datasets.
func DatasetSnapshot(path string, recur bool, props map[Prop]Property) (rd Dataset, err error) {
var cprops C.nvlist_ptr
if cprops, err = datasetPropertiesTonvlist(props); err != nil {
return
}
defer C.nvlist_free(cprops)
if errc := C.zfs_snapshot(libzfs_handle, C.CString(path), boolean_t(recur), cprops); errc != 0 {
csPath := C.CString(path)
defer C.free(unsafe.Pointer(csPath))
if errc := C.dataset_snapshot(csPath, booleanT(recur), cprops); errc != 0 {
err = LastError()
return
}
@ -274,56 +400,77 @@ func DatasetSnapshot(path string, recur bool, props map[ZFSProp]Property) (rd Da
return
}
// Return zfs dataset path/name
// Path return zfs dataset path/name
func (d *Dataset) Path() (path string, err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
name := C.zfs_get_name(d.list.zh)
name := C.dataset_get_name(d.list)
path = C.GoString(name)
return
}
// Rollback rollabck's dataset snapshot
func (d *Dataset) Rollback(snap *Dataset, force bool) (err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
if errc := C.zfs_rollback(d.list.zh,
snap.list.zh, boolean_t(force)); errc != 0 {
if errc := C.dataset_rollback(d.list, snap.list, booleanT(force)); errc != 0 {
err = LastError()
return
}
d.ReloadProperties()
return
}
func (d *Dataset) Rename(newname string, recur,
force_umount bool) (err error) {
// Promote promotes dataset clone
func (d *Dataset) Promote() (err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
if errc := C.zfs_rename(d.list.zh, C.CString(newname),
boolean_t(recur), boolean_t(force_umount)); errc != 0 {
if errc := C.dataset_promote(d.list); errc != 0 {
err = LastError()
return
}
d.ReloadProperties()
return
}
// Checks to see if the mount is active. If the filesystem is mounted, fills
// in 'where' with the current mountpoint, and returns true. Otherwise,
// Rename dataset
func (d *Dataset) Rename(newName string, recur,
forceUnmount bool) (err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
csNewName := C.CString(newName)
defer C.free(unsafe.Pointer(csNewName))
if errc := C.dataset_rename(d.list, csNewName,
booleanT(recur), booleanT(forceUnmount)); errc != 0 {
err = LastError()
return
}
d.ReloadProperties()
return
}
// IsMounted checks to see if the mount is active. If the filesystem is mounted,
// sets in 'where' argument the current mountpoint, and returns true. Otherwise,
// returns false.
func (d *Dataset) IsMounted() (mounted bool, where string) {
var cw *C.char
if d.list == nil {
return false, ""
return
}
m := C.zfs_is_mounted(d.list.zh, &cw)
defer C.free_cstring(cw)
if m != 0 {
return true, C.GoString(cw)
mp := C.dataset_is_mounted(d.list)
// defer C.free(mp)
if mounted = (mp != nil); mounted {
where = C.GoString(mp)
C.free(unsafe.Pointer(mp))
}
return false, ""
return
}
// Mount the given filesystem.
@ -332,7 +479,9 @@ func (d *Dataset) Mount(options string, flags int) (err error) {
err = errors.New(msgDatasetIsNil)
return
}
if ec := C.zfs_mount(d.list.zh, C.CString(options), C.int(flags)); ec != 0 {
csOptions := C.CString(options)
defer C.free(unsafe.Pointer(csOptions))
if ec := C.dataset_mount(d.list, csOptions, C.int(flags)); ec != 0 {
err = LastError()
}
return
@ -344,30 +493,36 @@ func (d *Dataset) Unmount(flags int) (err error) {
err = errors.New(msgDatasetIsNil)
return
}
if ec := C.zfs_unmount(d.list.zh, nil, C.int(flags)); ec != 0 {
if ec := C.dataset_unmount(d.list, C.int(flags)); ec != 0 {
err = LastError()
}
return
}
// Unmount this filesystem and any children inheriting the mountpoint property.
// UnmountAll unmount this filesystem and any children inheriting the
// mountpoint property.
func (d *Dataset) UnmountAll(flags int) (err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
if ec := C.zfs_unmountall(d.list.zh, C.int(flags)); ec != 0 {
err = LastError()
// This is implemented recursive because zfs_unmountall() didn't work
if len(d.Children) > 0 {
for _, c := range d.Children {
if err = c.UnmountAll(flags); err != nil {
return
}
}
}
return
return d.Unmount(flags)
}
// Convert property to name
// DatasetPropertyToName convert property to name
// ( returns built in string representation of property name).
// This is optional, you can represent each property with string
// name of choice.
func (d *Dataset) PropertyToName(p ZFSProp) (name string) {
if p == ZFSNumProps {
func DatasetPropertyToName(p Prop) (name string) {
if p == DatasetNumProps {
return "numofprops"
}
prop := C.zfs_prop_t(p)

125
zfs.h
View File

@ -10,22 +10,137 @@ struct dataset_list {
void *pnext;
};
typedef struct zfs_share {
uint64_t z_exportdata;
uint64_t z_sharedata;
uint64_t z_sharetype; /* 0 = share, 1 = unshare */
uint64_t z_sharemax; /* max length of share string */
} zfs_share_t;
/*
* A limited number of zpl level stats are retrievable
* with an ioctl. zfs diff is the current consumer.
*/
typedef struct zfs_stat {
uint64_t zs_gen;
uint64_t zs_mode;
uint64_t zs_links;
uint64_t zs_ctime[2];
} zfs_stat_t;
typedef struct zinject_record {
uint64_t zi_objset;
uint64_t zi_object;
uint64_t zi_start;
uint64_t zi_end;
uint64_t zi_guid;
uint32_t zi_level;
uint32_t zi_error;
uint64_t zi_type;
uint32_t zi_freq;
uint32_t zi_failfast;
char zi_func[MAXNAMELEN];
uint32_t zi_iotype;
int32_t zi_duration;
uint64_t zi_timer;
uint64_t zi_nlanes;
uint32_t zi_cmd;
uint32_t zi_pad;
} zinject_record_t;
typedef struct dmu_objset_stats {
uint64_t dds_num_clones; /* number of clones of this */
uint64_t dds_creation_txg;
uint64_t dds_guid;
dmu_objset_type_t dds_type;
uint8_t dds_is_snapshot;
uint8_t dds_inconsistent;
char dds_origin[ZFS_MAX_DATASET_NAME_LEN];
} dmu_objset_stats_t;
typedef struct zfs_cmd {
char zc_name[MAXPATHLEN]; /* name of pool or dataset */
uint64_t zc_nvlist_src; /* really (char *) */
uint64_t zc_nvlist_src_size;
uint64_t zc_nvlist_dst; /* really (char *) */
uint64_t zc_nvlist_dst_size;
boolean_t zc_nvlist_dst_filled; /* put an nvlist in dst? */
int zc_pad2;
/*
* The following members are for legacy ioctls which haven't been
* converted to the new method.
*/
uint64_t zc_history; /* really (char *) */
char zc_value[MAXPATHLEN * 2];
char zc_string[MAXNAMELEN];
uint64_t zc_guid;
uint64_t zc_nvlist_conf; /* really (char *) */
uint64_t zc_nvlist_conf_size;
uint64_t zc_cookie;
uint64_t zc_objset_type;
uint64_t zc_perm_action;
uint64_t zc_history_len;
uint64_t zc_history_offset;
uint64_t zc_obj;
uint64_t zc_iflags; /* internal to zfs(7fs) */
zfs_share_t zc_share;
dmu_objset_stats_t zc_objset_stats;
zinject_record_t zc_inject_record;
uint32_t zc_defer_destroy;
uint32_t zc_flags;
uint64_t zc_action_handle;
int zc_cleanup_fd;
uint8_t zc_simple;
uint8_t zc_pad[3]; /* alignment */
uint64_t zc_sendobj;
uint64_t zc_fromobj;
uint64_t zc_createtxg;
zfs_stat_t zc_stat;
} zfs_cmd_t;
typedef struct dataset_list dataset_list_t;
typedef struct dataset_list* dataset_list_ptr;
dataset_list_t *create_dataset_list_item();
void dataset_list_close(dataset_list_t *list);
void dataset_list_free(dataset_list_t *list);
int dataset_list_root(libzfs_handle_t *libzfs, dataset_list_t **first);
int dataset_list_children(zfs_handle_t *zfs, dataset_list_t **first);
dataset_list_t* dataset_list_root();
dataset_list_t* dataset_list_children(dataset_list_t *dataset);
dataset_list_t *dataset_next(dataset_list_t *dataset);
int dataset_type(dataset_list_ptr dataset);
int read_dataset_property(zfs_handle_t *zh, property_list_t *list, int prop);
dataset_list_ptr dataset_open(const char *path);
int dataset_create(const char *path, zfs_type_t type, nvlist_ptr props);
int dataset_destroy(dataset_list_ptr dataset, boolean_t defer);
zpool_list_ptr dataset_get_pool(dataset_list_ptr dataset);
int dataset_prop_set(dataset_list_ptr dataset, zfs_prop_t prop, const char *value);
int dataset_user_prop_set(dataset_list_ptr dataset, const char *prop, const char *value);
int dataset_clone(dataset_list_ptr dataset, const char *target, nvlist_ptr props);
int dataset_snapshot(const char *path, boolean_t recur, nvlist_ptr props);
int dataset_rollback(dataset_list_ptr dataset, dataset_list_ptr snapshot, boolean_t force);
int dataset_promote(dataset_list_ptr dataset);
int dataset_rename(dataset_list_ptr dataset, const char* new_name, boolean_t recur, boolean_t force_unm);
const char* dataset_is_mounted(dataset_list_ptr dataset);
int dataset_mount(dataset_list_ptr dataset, const char *options, int flags);
int dataset_unmount(dataset_list_ptr dataset, int flags);
int dataset_unmountall(dataset_list_ptr dataset, int flags);
const char *dataset_get_name(dataset_list_ptr ds);
int clear_last_error(libzfs_handle_t *libzfs);
property_list_t *read_dataset_property(dataset_list_t *dataset, int prop);
property_list_t *read_user_property(dataset_list_t *dataset, const char* prop);
char** alloc_strings(int size);
char** alloc_cstrings(int size);
void strings_setat(char **a, int at, char *v);
sendflags_t *alloc_sendflags();
recvflags_t *alloc_recvflags();
struct zfs_cmd *new_zfs_cmd();
int estimate_send_size(struct zfs_cmd *zc);
#endif
/* SERVERWARE_ZFS_H */

233
zfs_test.go Normal file
View File

@ -0,0 +1,233 @@
package zfs_test
import (
"fmt"
"testing"
"github.com/bicomsystems/go-libzfs"
)
/* ------------------------------------------------------------------------- */
// HELPERS:
var TSTDatasetPath = TSTPoolName + "/DATASET"
var TSTVolumePath = TSTDatasetPath + "/VOLUME"
var TSTDatasetPathSnap = TSTDatasetPath + "@test"
func printDatasets(ds []zfs.Dataset) error {
for _, d := range ds {
path, err := d.Path()
if err != nil {
return err
}
p, err := d.GetProperty(zfs.DatasetPropType)
if err != nil {
return err
}
fmt.Printf(" %30s | %10s\n", path, p.Value)
if len(d.Children) > 0 {
printDatasets(d.Children)
}
}
return nil
}
/* ------------------------------------------------------------------------- */
// TESTS:
func zfsTestDatasetCreate(t *testing.T) {
// reinit names used in case TESTPOOL was in conflict
TSTDatasetPath = TSTPoolName + "/DATASET"
TSTVolumePath = TSTDatasetPath + "/VOLUME"
TSTDatasetPathSnap = TSTDatasetPath + "@test"
println("TEST DatasetCreate(", TSTDatasetPath, ") (filesystem) ... ")
props := make(map[zfs.Prop]zfs.Property)
d, err := zfs.DatasetCreate(TSTDatasetPath, zfs.DatasetTypeFilesystem, props)
if err != nil {
t.Error(err)
return
}
d.Close()
print("PASS\n\n")
strSize := "536870912" // 512M
println("TEST DatasetCreate(", TSTVolumePath, ") (volume) ... ")
props[zfs.DatasetPropVolsize] = zfs.Property{Value: strSize}
// In addition I explicitly choose some more properties to be set.
props[zfs.DatasetPropVolblocksize] = zfs.Property{Value: "4096"}
props[zfs.DatasetPropReservation] = zfs.Property{Value: strSize}
d, err = zfs.DatasetCreate(TSTVolumePath, zfs.DatasetTypeVolume, props)
if err != nil {
t.Error(err)
return
}
d.Close()
print("PASS\n\n")
}
func zfsTestDatasetOpen(t *testing.T) {
println("TEST DatasetOpen(", TSTDatasetPath, ") ... ")
d, err := zfs.DatasetOpen(TSTDatasetPath)
if err != nil {
t.Error(err)
return
}
defer d.Close()
print("PASS\n\n")
println("TEST Set/GetUserProperty(prop, value string) ... ")
var p zfs.Property
// Test set/get user property
if err = d.SetUserProperty("go-libzfs:test", "yes"); err != nil {
t.Error(err)
return
}
if p, err = d.GetUserProperty("go-libzfs:test"); err != nil {
t.Error(err)
return
}
println("go-libzfs:test", " = ",
p.Value)
print("PASS\n\n")
}
func zfsTestDatasetSetProperty(t *testing.T) {
println("TEST Dataset SetProp(", TSTDatasetPath, ") ... ")
d, err := zfs.DatasetOpen(TSTDatasetPath)
if err != nil {
t.Error(err)
return
}
defer d.Close()
if err = d.SetProperty(zfs.DatasetPropOverlay, "on"); err != nil {
t.Error(err)
return
}
if prop, err := d.GetProperty(zfs.DatasetPropOverlay); err != nil {
t.Error(err)
return
} else {
println(prop.Value)
if prop.Value != "on" {
t.Error(fmt.Errorf("Update of dataset property failed"))
return
}
}
print("PASS\n\n")
return
}
func zfsTestDatasetOpenAll(t *testing.T) {
println("TEST DatasetOpenAll()/DatasetCloseAll() ... ")
ds, err := zfs.DatasetOpenAll()
if err != nil {
t.Error(err)
return
}
if err = printDatasets(ds); err != nil {
zfs.DatasetCloseAll(ds)
t.Error(err)
return
}
zfs.DatasetCloseAll(ds)
print("PASS\n\n")
}
func zfsTestDatasetSnapshot(t *testing.T) {
println("TEST DatasetSnapshot(", TSTDatasetPath, ", true, ...) ... ")
props := make(map[zfs.Prop]zfs.Property)
d, err := zfs.DatasetSnapshot(TSTDatasetPathSnap, true, props)
if err != nil {
t.Error(err)
return
}
defer d.Close()
print("PASS\n\n")
}
func zfsTestDatasetDestroy(t *testing.T) {
println("TEST DATASET Destroy( ", TSTDatasetPath, " ) ... ")
d, err := zfs.DatasetOpen(TSTDatasetPath)
if err != nil {
t.Error(err)
return
}
defer d.Close()
if err = d.DestroyRecursive(); err != nil {
t.Error(err)
return
}
print("PASS\n\n")
}
/* ------------------------------------------------------------------------- */
// EXAMPLES:
// Example of creating ZFS volume
func ExampleDatasetCreate() {
// Create map to represent ZFS dataset properties. This is equivalent to
// list of properties you can get from ZFS CLI tool, and some more
// internally used by libzfs.
props := make(map[zfs.Prop]zfs.Property)
// I choose to create (block) volume 1GiB in size. Size is just ZFS dataset
// property and this is done as map of strings. So, You have to either
// specify size as base 10 number in string, or use strconv package or
// similar to convert in to string (base 10) from numeric type.
strSize := "1073741824"
props[zfs.DatasetPropVolsize] = zfs.Property{Value: strSize}
// In addition I explicitly choose some more properties to be set.
props[zfs.DatasetPropVolblocksize] = zfs.Property{Value: "4096"}
props[zfs.DatasetPropReservation] = zfs.Property{Value: strSize}
// Lets create desired volume
d, err := zfs.DatasetCreate("TESTPOOL/VOLUME1", zfs.DatasetTypeVolume, props)
if err != nil {
println(err.Error())
return
}
// Dataset have to be closed for memory cleanup
defer d.Close()
println("Created zfs volume TESTPOOL/VOLUME1")
}
func ExampleDatasetOpen() {
// Open dataset and read its available space
d, err := zfs.DatasetOpen("TESTPOOL/DATASET1")
if err != nil {
panic(err.Error())
}
defer d.Close()
var p zfs.Property
if p, err = d.GetProperty(zfs.DatasetPropAvailable); err != nil {
panic(err.Error())
}
println(zfs.DatasetPropertyToName(zfs.DatasetPropAvailable), " = ",
p.Value)
}
func ExampleDatasetOpenAll() {
datasets, err := zfs.DatasetOpenAll()
if err != nil {
panic(err.Error())
}
defer zfs.DatasetCloseAll(datasets)
// Print out path and type of root datasets
for _, d := range datasets {
path, err := d.Path()
if err != nil {
panic(err.Error())
}
p, err := d.GetProperty(zfs.DatasetPropType)
if err != nil {
panic(err.Error())
}
fmt.Printf("%30s | %10s\n", path, p.Value)
}
}

588
zpool.c
View File

@ -1,14 +1,89 @@
/* C wrappers around some zfs calls and C in general that should simplify
* using libzfs from go language, and make go code shorter and more readable.
*/
#include <libzfs.h>
#include <memory.h>
#include <string.h>
#include <stdio.h>
#include "common.h"
#include "zpool.h"
char *sZPOOL_CONFIG_VERSION = ZPOOL_CONFIG_VERSION;
char *sZPOOL_CONFIG_POOL_NAME = ZPOOL_CONFIG_POOL_NAME;
char *sZPOOL_CONFIG_POOL_STATE = ZPOOL_CONFIG_POOL_STATE;
char *sZPOOL_CONFIG_POOL_TXG = ZPOOL_CONFIG_POOL_TXG;
char *sZPOOL_CONFIG_POOL_GUID = ZPOOL_CONFIG_POOL_GUID;
char *sZPOOL_CONFIG_CREATE_TXG = ZPOOL_CONFIG_CREATE_TXG;
char *sZPOOL_CONFIG_TOP_GUID = ZPOOL_CONFIG_TOP_GUID;
char *sZPOOL_CONFIG_VDEV_TREE = ZPOOL_CONFIG_VDEV_TREE;
char *sZPOOL_CONFIG_TYPE = ZPOOL_CONFIG_TYPE;
char *sZPOOL_CONFIG_CHILDREN = ZPOOL_CONFIG_CHILDREN;
char *sZPOOL_CONFIG_ID = ZPOOL_CONFIG_ID;
char *sZPOOL_CONFIG_GUID = ZPOOL_CONFIG_GUID;
char *sZPOOL_CONFIG_PATH = ZPOOL_CONFIG_PATH;
char *sZPOOL_CONFIG_DEVID = ZPOOL_CONFIG_DEVID;
char *sZPOOL_CONFIG_METASLAB_ARRAY = ZPOOL_CONFIG_METASLAB_ARRAY;
char *sZPOOL_CONFIG_METASLAB_SHIFT = ZPOOL_CONFIG_METASLAB_SHIFT;
char *sZPOOL_CONFIG_ASHIFT = ZPOOL_CONFIG_ASHIFT;
char *sZPOOL_CONFIG_ASIZE = ZPOOL_CONFIG_ASIZE;
char *sZPOOL_CONFIG_DTL = ZPOOL_CONFIG_DTL;
char *sZPOOL_CONFIG_SCAN_STATS = ZPOOL_CONFIG_SCAN_STATS;
char *sZPOOL_CONFIG_VDEV_STATS = ZPOOL_CONFIG_VDEV_STATS;
char *sZPOOL_CONFIG_WHOLE_DISK = ZPOOL_CONFIG_WHOLE_DISK;
char *sZPOOL_CONFIG_ERRCOUNT = ZPOOL_CONFIG_ERRCOUNT;
char *sZPOOL_CONFIG_NOT_PRESENT = ZPOOL_CONFIG_NOT_PRESENT;
char *sZPOOL_CONFIG_SPARES = ZPOOL_CONFIG_SPARES;
char *sZPOOL_CONFIG_IS_SPARE = ZPOOL_CONFIG_IS_SPARE;
char *sZPOOL_CONFIG_NPARITY = ZPOOL_CONFIG_NPARITY;
char *sZPOOL_CONFIG_HOSTID = ZPOOL_CONFIG_HOSTID;
char *sZPOOL_CONFIG_HOSTNAME = ZPOOL_CONFIG_HOSTNAME;
char *sZPOOL_CONFIG_LOADED_TIME = ZPOOL_CONFIG_LOADED_TIME;
char *sZPOOL_CONFIG_UNSPARE = ZPOOL_CONFIG_UNSPARE;
char *sZPOOL_CONFIG_PHYS_PATH = ZPOOL_CONFIG_PHYS_PATH;
char *sZPOOL_CONFIG_IS_LOG = ZPOOL_CONFIG_IS_LOG;
char *sZPOOL_CONFIG_L2CACHE = ZPOOL_CONFIG_L2CACHE;
char *sZPOOL_CONFIG_HOLE_ARRAY = ZPOOL_CONFIG_HOLE_ARRAY;
char *sZPOOL_CONFIG_VDEV_CHILDREN = ZPOOL_CONFIG_VDEV_CHILDREN;
char *sZPOOL_CONFIG_IS_HOLE = ZPOOL_CONFIG_IS_HOLE;
char *sZPOOL_CONFIG_DDT_HISTOGRAM = ZPOOL_CONFIG_DDT_HISTOGRAM;
char *sZPOOL_CONFIG_DDT_OBJ_STATS = ZPOOL_CONFIG_DDT_OBJ_STATS;
char *sZPOOL_CONFIG_DDT_STATS = ZPOOL_CONFIG_DDT_STATS;
char *sZPOOL_CONFIG_SPLIT = ZPOOL_CONFIG_SPLIT;
char *sZPOOL_CONFIG_ORIG_GUID = ZPOOL_CONFIG_ORIG_GUID;
char *sZPOOL_CONFIG_SPLIT_GUID = ZPOOL_CONFIG_SPLIT_GUID;
char *sZPOOL_CONFIG_SPLIT_LIST = ZPOOL_CONFIG_SPLIT_LIST;
char *sZPOOL_CONFIG_REMOVING = ZPOOL_CONFIG_REMOVING;
char *sZPOOL_CONFIG_RESILVER_TXG = ZPOOL_CONFIG_RESILVER_TXG;
char *sZPOOL_CONFIG_COMMENT = ZPOOL_CONFIG_COMMENT;
char *sZPOOL_CONFIG_SUSPENDED = ZPOOL_CONFIG_SUSPENDED;
char *sZPOOL_CONFIG_TIMESTAMP = ZPOOL_CONFIG_TIMESTAMP;
char *sZPOOL_CONFIG_BOOTFS = ZPOOL_CONFIG_BOOTFS;
char *sZPOOL_CONFIG_MISSING_DEVICES = ZPOOL_CONFIG_MISSING_DEVICES;
char *sZPOOL_CONFIG_LOAD_INFO = ZPOOL_CONFIG_LOAD_INFO;
char *sZPOOL_CONFIG_REWIND_INFO = ZPOOL_CONFIG_REWIND_INFO;
char *sZPOOL_CONFIG_UNSUP_FEAT = ZPOOL_CONFIG_UNSUP_FEAT;
char *sZPOOL_CONFIG_ENABLED_FEAT = ZPOOL_CONFIG_ENABLED_FEAT;
char *sZPOOL_CONFIG_CAN_RDONLY = ZPOOL_CONFIG_CAN_RDONLY;
char *sZPOOL_CONFIG_FEATURES_FOR_READ = ZPOOL_CONFIG_FEATURES_FOR_READ;
char *sZPOOL_CONFIG_FEATURE_STATS = ZPOOL_CONFIG_FEATURE_STATS;
char *sZPOOL_CONFIG_ERRATA = ZPOOL_CONFIG_ERRATA;
char *sZPOOL_CONFIG_OFFLINE = ZPOOL_CONFIG_OFFLINE;
char *sZPOOL_CONFIG_FAULTED = ZPOOL_CONFIG_FAULTED;
char *sZPOOL_CONFIG_DEGRADED = ZPOOL_CONFIG_DEGRADED;
char *sZPOOL_CONFIG_REMOVED = ZPOOL_CONFIG_REMOVED;
char *sZPOOL_CONFIG_FRU = ZPOOL_CONFIG_FRU;
char *sZPOOL_CONFIG_AUX_STATE = ZPOOL_CONFIG_AUX_STATE;
char *sZPOOL_REWIND_POLICY = ZPOOL_REWIND_POLICY;
char *sZPOOL_REWIND_REQUEST = ZPOOL_REWIND_REQUEST;
char *sZPOOL_REWIND_REQUEST_TXG = ZPOOL_REWIND_REQUEST_TXG;
char *sZPOOL_REWIND_META_THRESH = ZPOOL_REWIND_META_THRESH;
char *sZPOOL_REWIND_DATA_THRESH = ZPOOL_REWIND_DATA_THRESH;
char *sZPOOL_CONFIG_LOAD_TIME = ZPOOL_CONFIG_LOAD_TIME;
char *sZPOOL_CONFIG_LOAD_DATA_ERRORS = ZPOOL_CONFIG_LOAD_DATA_ERRORS;
char *sZPOOL_CONFIG_REWIND_TIME = ZPOOL_CONFIG_REWIND_TIME;
static char _lasterr_[1024];
const char *lasterr(void) {
@ -35,26 +110,24 @@ int zpool_list_callb(zpool_handle_t *pool, void *data) {
return 0;
}
int zpool_list(libzfs_handle_t *libzfs, zpool_list_t **first) {
zpool_list_ptr zpool_list_openall() {
int err = 0;
zpool_list_t *zlist = create_zpool_list_item();
err = zpool_iter(libzfs, zpool_list_callb, &zlist);
if ( zlist->zph ) {
*first = zlist;
} else {
*first = 0;
free(zlist);
err = zpool_iter(libzfsHandle, zpool_list_callb, &zlist);
if ( err != 0 || zlist->zph == NULL ) {
zpool_list_free(zlist);
zlist = NULL;
}
return err;
return zlist;
}
zpool_list_t* zpool_list_open(libzfs_handle_t *libzfs, const char *name) {
zpool_list_t* zpool_list_open(const char *name) {
zpool_list_t *zlist = create_zpool_list_item();
zlist->zph = zpool_open(libzfs, name);
zlist->zph = zpool_open(libzfsHandle, name);
if ( zlist->zph ) {
return zlist;
} else {
free(zlist);
zpool_list_free(zlist);
}
return 0;
}
@ -63,26 +136,18 @@ zpool_list_t *zpool_next(zpool_list_t *pool) {
return pool->pnext;
}
void zpool_list_free(zpool_list_t *list) {
zpool_list_ptr next;
while(list) {
next = list->pnext;
free(list);
list = next;
}
}
void zpool_list_close(zpool_list_t *pool) {
zpool_close(pool->zph);
free(pool);
}
property_list_t *new_property_list() {
property_list_t *r = malloc(sizeof(property_list_t));
memset(r, 0, sizeof(property_list_t));
return r;
}
void free_properties(property_list_t *root) {
if (root != 0) {
property_list_t *tmp = 0;
do {
tmp = root->pnext;
free(root);
root = tmp;
} while(tmp);
}
zpool_list_free(pool);
}
property_list_t *next_property(property_list_t *list) {
@ -112,166 +177,73 @@ void zprop_source_tostr(char *dst, zprop_source_t source) {
break;
default:
strcpy(dst, "default");
break;
break;
}
}
int read_zpool_property(zpool_handle_t *zh, property_list_t *list, int prop) {
property_list_ptr read_zpool_property(zpool_list_ptr pool, int prop) {
int r = 0;
zprop_source_t source;
property_list_ptr list = new_property_list();
r = zpool_get_prop(zh, prop,
r = zpool_get_prop(pool->zph, prop,
list->value, INT_MAX_VALUE, &source);
if (r == 0) {
// strcpy(list->name, zpool_prop_to_name(prop));
zprop_source_tostr(list->source, source);
} else {
free_properties(list);
return NULL;
}
list->property = (int)prop;
return r;
return list;
}
int read_append_zpool_property(zpool_handle_t *zh, property_list_t **proot,
zpool_prop_t prop) {
property_list_ptr read_append_zpool_property(zpool_list_ptr pool, property_list_ptr proot, zpool_prop_t prop) {
int r = 0;
property_list_t *newitem = NULL, *root = *proot;
newitem = new_property_list();
property_list_t *newitem = NULL;
r = read_zpool_property(zh, newitem, prop);
// printf("p: %s %s %s\n", newitem->name, newitem->value, newitem->source);
newitem->pnext = root;
*proot = root = newitem;
if (r != 0) {
free_properties(root);
*proot = NULL;
newitem = read_zpool_property(pool, prop);
if (newitem == NULL) {
return proot;
}
return r;
// printf("p: %s %s %s\n", newitem->name, newitem->value, newitem->source);
newitem->pnext = proot;
proot = newitem;
return proot;
}
property_list_t *read_zpool_properties(zpool_handle_t *zh) {
property_list_t *read_zpool_properties(zpool_list_ptr pool) {
// read pool name as first property
property_list_t *root = NULL, *list = NULL;
int r = read_append_zpool_property(zh, &root, ZPOOL_PROP_NAME);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_SIZE);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_CAPACITY);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_ALTROOT);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_HEALTH);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_GUID);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_VERSION);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_BOOTFS);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_DELEGATION);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_AUTOREPLACE);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_CACHEFILE);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_FAILUREMODE);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_LISTSNAPS);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_AUTOEXPAND);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_DEDUPDITTO);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_DEDUPRATIO);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_FREE);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_ALLOCATED);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_READONLY);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_ASHIFT);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_COMMENT);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_EXPANDSZ);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_FREEING);
if (r != 0) {
return 0;
}
root = read_append_zpool_property(pool, root, ZPOOL_PROP_NAME);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_SIZE);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_CAPACITY);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_ALTROOT);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_HEALTH);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_GUID);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_VERSION);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_BOOTFS);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_DELEGATION);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_AUTOREPLACE);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_CACHEFILE);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_FAILUREMODE);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_LISTSNAPS);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_AUTOEXPAND);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_DEDUPDITTO);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_DEDUPRATIO);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_FREE);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_ALLOCATED);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_READONLY);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_ASHIFT);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_COMMENT);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_EXPANDSZ);
root = read_append_zpool_property(pool, root, ZPOOL_PROP_FREEING);
list = new_property_list();
@ -296,75 +268,75 @@ const char *gettext(const char *txt) {
/*
* Add a property pair (name, string-value) into a property nvlist.
*/
int
add_prop_list(const char *propname, char *propval, nvlist_t **props,
boolean_t poolprop) {
zpool_prop_t prop = ZPROP_INVAL;
zfs_prop_t fprop;
nvlist_t *proplist;
const char *normnm;
char *strval;
// int
// add_prop_list(const char *propname, char *propval, nvlist_t **props,
// boolean_t poolprop) {
// zpool_prop_t prop = ZPROP_INVAL;
// zfs_prop_t fprop;
// nvlist_t *proplist;
// const char *normnm;
// char *strval;
if (*props == NULL &&
nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
(void) snprintf(_lasterr_, 1024, "internal error: out of memory");
return (1);
}
// if (*props == NULL &&
// nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
// (void) snprintf(_lasterr_, 1024, "internal error: out of memory");
// return (1);
// }
proplist = *props;
// proplist = *props;
if (poolprop) {
const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
// if (poolprop) {
// const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL &&
!zpool_prop_feature(propname)) {
(void) snprintf(_lasterr_, 1024, "property '%s' is "
"not a valid pool property", propname);
return (2);
}
// if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL &&
// !zpool_prop_feature(propname)) {
// (void) snprintf(_lasterr_, 1024, "property '%s' is "
// "not a valid pool property", propname);
// return (2);
// }
/*
* feature@ properties and version should not be specified
* at the same time.
*/
// if ((prop == ZPROP_INVAL && zpool_prop_feature(propname) &&
// nvlist_exists(proplist, vname)) ||
// (prop == ZPOOL_PROP_VERSION &&
// prop_list_contains_feature(proplist))) {
// (void) fprintf(stderr, gettext("'feature@' and "
// "'version' properties cannot be specified "
// "together\n"));
// return (2);
// }
// /*
// * feature@ properties and version should not be specified
// * at the same time.
// */
// // if ((prop == ZPROP_INVAL && zpool_prop_feature(propname) &&
// // nvlist_exists(proplist, vname)) ||
// // (prop == ZPOOL_PROP_VERSION &&
// // prop_list_contains_feature(proplist))) {
// // (void) fprintf(stderr, gettext("'feature@' and "
// // "'version' properties cannot be specified "
// // "together\n"));
// // return (2);
// // }
if (zpool_prop_feature(propname))
normnm = propname;
else
normnm = zpool_prop_to_name(prop);
} else {
if ((fprop = zfs_name_to_prop(propname)) != ZPROP_INVAL) {
normnm = zfs_prop_to_name(fprop);
} else {
normnm = propname;
}
}
// if (zpool_prop_feature(propname))
// normnm = propname;
// else
// normnm = zpool_prop_to_name(prop);
// } else {
// if ((fprop = zfs_name_to_prop(propname)) != ZPROP_INVAL) {
// normnm = zfs_prop_to_name(fprop);
// } else {
// normnm = propname;
// }
// }
if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
prop != ZPOOL_PROP_CACHEFILE) {
(void) snprintf(_lasterr_, 1024, "property '%s' "
"specified multiple times", propname);
return (2);
}
// if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
// prop != ZPOOL_PROP_CACHEFILE) {
// (void) snprintf(_lasterr_, 1024, "property '%s' "
// "specified multiple times", propname);
// return (2);
// }
if (nvlist_add_string(proplist, normnm, propval) != 0) {
(void) snprintf(_lasterr_, 1024, "internal "
"error: out of memory\n");
return (1);
}
// if (nvlist_add_string(proplist, normnm, propval) != 0) {
// (void) snprintf(_lasterr_, 1024, "internal "
// "error: out of memory\n");
// return (1);
// }
return (0);
}
// return (0);
// }
nvlist_t** nvlist_alloc_array(int count) {
return malloc(count*sizeof(nvlist_t*));
@ -378,6 +350,166 @@ void nvlist_free_array(nvlist_t **a) {
free(a);
}
void free_cstring(char *str) {
free(str);
nvlist_t *nvlist_array_at(nvlist_t **a, uint_t i) {
return a[i];
}
int refresh_stats(zpool_list_t *pool)
{
boolean_t missing;
int err = zpool_refresh_stats(pool->zph, &missing);
if ( err != 0 ) {
return err;
}
if ( missing == B_TRUE ) {
return -1;
}
return 0;
}
const char *get_vdev_type(nvlist_ptr nv) {
char *value = NULL;
int r = nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &value);
if(r != 0) {
return NULL;
}
return value;
}
const vdev_stat_ptr get_vdev_stats(nvlist_ptr nv) {
vdev_stat_ptr vs = NULL;
uint_t count;
int r = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, (uint64_t**)&vs, &count);
if(r != 0) {
return NULL;
}
return vs;
}
pool_scan_stat_ptr get_vdev_scan_stats(nvlist_t *nv) {
pool_scan_stat_ptr vds = NULL;
uint_t c;
int r = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_SCAN_STATS, (uint64_t**)&vds, &c);
if(r != 0) {
return NULL;
}
return vds;
}
vdev_children_ptr get_vdev_children(nvlist_t *nv) {
int r;
vdev_children_ptr children = malloc(sizeof(vdev_children_t));
memset(children, 0, sizeof(vdev_children_t));
r = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &(children->first), &(children->count));
if (r != 0) {
free(children);
return NULL;
}
return children;
}
vdev_children_ptr get_vdev_spares(nvlist_t *nv) {
int r;
vdev_children_ptr children = malloc(sizeof(vdev_children_t));
memset(children, 0, sizeof(vdev_children_t));
r = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &(children->first), &(children->count));
if (r != 0) {
free(children);
return NULL;
}
return children;
}
vdev_children_ptr get_vdev_l2cache(nvlist_t *nv) {
int r;
vdev_children_ptr children = malloc(sizeof(vdev_children_t));
memset(children, 0, sizeof(vdev_children_t));
r = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, &(children->first), &(children->count));
if (r != 0) {
free(children);
return NULL;
}
return children;
}
const char *get_vdev_path(nvlist_ptr nv) {
char *path = NULL;
uint64_t notpresent = 0;
int r = nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &notpresent);
if (r == 0 || notpresent != 0) {
if ( 0 != nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) ) {
return NULL;
}
}
return path;
}
uint64_t get_vdev_is_log(nvlist_ptr nv) {
uint64_t islog = B_FALSE;
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
return islog;
}
// return
uint64_t get_zpool_state(nvlist_ptr nv) {
uint64_t state = 0;
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_POOL_STATE, &state);
return state;
}
uint64_t get_zpool_guid(nvlist_ptr nv) {
uint64_t guid = 0;
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_POOL_GUID, &guid);
return guid;
}
const char *get_zpool_name(nvlist_ptr nv) {
char *name = NULL;
if (0 != nvlist_lookup_string(nv, ZPOOL_CONFIG_POOL_NAME, &name)) {
return NULL;
}
return name;
}
const char *get_zpool_comment(nvlist_ptr nv) {
char *comment = NULL;
if (0 != nvlist_lookup_string(nv, ZPOOL_CONFIG_COMMENT, &comment)) {
return NULL;
}
return comment;
}
nvlist_ptr get_zpool_vdev_tree(nvlist_ptr nv) {
nvlist_ptr vdev_tree = NULL;
if ( 0 != nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) ) {
return NULL;
}
return vdev_tree;
}
nvlist_ptr go_zpool_search_import(libzfs_handle_ptr zfsh, int paths, char **path, boolean_t do_scan) {
importargs_t idata = { 0 };
idata.path = path;
idata.paths = paths;
// idata.scan = 0;
return zpool_search_import(zfsh, &idata);
}
int do_zpool_clear(zpool_list_t *pool, const char *device, u_int32_t rewind_policy) {
nvlist_t *policy = NULL;
int ret = 0;
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind_policy) != 0)
return (1);
if (zpool_clear(pool->zph, device, policy) != 0)
ret = 1;
nvlist_free(policy);
return (ret);
}

993
zpool.go

File diff suppressed because it is too large Load Diff

141
zpool.h
View File

@ -5,54 +5,159 @@
#ifndef SERVERWARE_ZPOOL_H
#define SERVERWARE_ZPOOL_H
#define INT_MAX_NAME 256
#define INT_MAX_VALUE 1024
/* Rewind request information */
#define ZPOOL_NO_REWIND 1 /* No policy - default behavior */
#define ZPOOL_NEVER_REWIND 2 /* Do not search for best txg or rewind */
#define ZPOOL_TRY_REWIND 4 /* Search for best txg, but do not rewind */
#define ZPOOL_DO_REWIND 8 /* Rewind to best txg w/in deferred frees */
#define ZPOOL_EXTREME_REWIND 16 /* Allow extreme measures to find best txg */
#define ZPOOL_REWIND_MASK 28 /* All the possible rewind bits */
#define ZPOOL_REWIND_POLICIES 31 /* All the possible policy bits */
struct zpool_list {
zpool_handle_t *zph;
void *pnext;
};
typedef struct property_list {
char value[INT_MAX_VALUE];
char source[INT_MAX_NAME];
int property;
void *pnext;
} property_list_t;
struct vdev_children {
nvlist_t **first;
uint_t count;
};
typedef struct zpool_list zpool_list_t;
typedef struct zpool_list* zpool_list_ptr;
typedef struct vdev_children vdev_children_t;
typedef struct vdev_children* vdev_children_ptr;
property_list_t *new_property_list();
typedef struct pool_scan_stat* pool_scan_stat_ptr;
zpool_list_t *create_zpool_list_item();
void zprop_source_tostr(char *dst, zprop_source_t source);
zpool_list_t* zpool_list_open(libzfs_handle_t *libzfs, const char *name);
int zpool_list(libzfs_handle_t *libzfs, zpool_list_t **first);
zpool_list_t* zpool_list_open(const char *name);
zpool_list_ptr zpool_list_openall();
zpool_list_t *zpool_next(zpool_list_t *pool);
void zpool_list_free(zpool_list_t *list);
void zpool_list_close(zpool_list_t *pool);
int read_zpool_property(zpool_handle_t *zh, property_list_t *list, int prop);
property_list_t *read_zpool_properties(zpool_handle_t *zh);
property_list_ptr read_zpool_property(zpool_list_ptr pool, int prop);
property_list_t *read_zpool_properties(zpool_list_ptr pool);
property_list_t *next_property(property_list_t *list);
void free_properties(property_list_t *root);
pool_state_t zpool_read_state(zpool_handle_t *zh);
const char *lasterr(void);
int
add_prop_list(const char *propname, char *propval, nvlist_t **props,
boolean_t poolprop);
// int
// add_prop_list(const char *propname, char *propval, nvlist_t **props,
// boolean_t poolprop);
nvlist_t** nvlist_alloc_array(int count);
void nvlist_array_set(nvlist_t** a, int i, nvlist_t *item);
void nvlist_free_array(nvlist_t **a);
nvlist_t *nvlist_array_at(nvlist_t **a, uint_t i);
int refresh_stats(zpool_list_t *pool);
const char *get_vdev_type(nvlist_ptr nv);
const vdev_stat_ptr get_vdev_stats(nvlist_ptr nv);
pool_scan_stat_ptr get_vdev_scan_stats(nvlist_t *nv);
vdev_children_ptr get_vdev_children(nvlist_t *nv);
vdev_children_ptr get_vdev_spares(nvlist_t *nv);
vdev_children_ptr get_vdev_l2cache(nvlist_t *nv);
const char *get_vdev_path(nvlist_ptr nv);
uint64_t get_vdev_is_log(nvlist_ptr nv);
uint64_t get_zpool_state(nvlist_ptr nv);
uint64_t get_zpool_guid(nvlist_ptr nv);
const char *get_zpool_name(nvlist_ptr nv);
const char *get_zpool_comment(nvlist_ptr nv);
nvlist_ptr get_zpool_vdev_tree(nvlist_ptr nv);
nvlist_ptr go_zpool_search_import(libzfs_handle_ptr zfsh, int paths, char **path, boolean_t do_scan);
__uint64_t set_zpool_vdev_online(zpool_list_t *pool, const char *path, int flags);
int set_zpool_vdev_offline(zpool_list_t *pool, const char *path, boolean_t istmp, boolean_t force);
int do_zpool_clear(zpool_list_t *pool, const char *device, u_int32_t rewind_policy);
void free_cstring(char *str);
extern char *sZPOOL_CONFIG_VERSION;
extern char *sZPOOL_CONFIG_POOL_NAME;
extern char *sZPOOL_CONFIG_POOL_STATE;
extern char *sZPOOL_CONFIG_POOL_TXG;
extern char *sZPOOL_CONFIG_POOL_GUID;
extern char *sZPOOL_CONFIG_CREATE_TXG;
extern char *sZPOOL_CONFIG_TOP_GUID;
extern char *sZPOOL_CONFIG_VDEV_TREE;
extern char *sZPOOL_CONFIG_TYPE;
extern char *sZPOOL_CONFIG_CHILDREN;
extern char *sZPOOL_CONFIG_ID;
extern char *sZPOOL_CONFIG_GUID;
extern char *sZPOOL_CONFIG_PATH;
extern char *sZPOOL_CONFIG_DEVID;
extern char *sZPOOL_CONFIG_METASLAB_ARRAY;
extern char *sZPOOL_CONFIG_METASLAB_SHIFT;
extern char *sZPOOL_CONFIG_ASHIFT;
extern char *sZPOOL_CONFIG_ASIZE;
extern char *sZPOOL_CONFIG_DTL;
extern char *sZPOOL_CONFIG_SCAN_STATS;
extern char *sZPOOL_CONFIG_VDEV_STATS;
extern char *sZPOOL_CONFIG_WHOLE_DISK;
extern char *sZPOOL_CONFIG_ERRCOUNT;
extern char *sZPOOL_CONFIG_NOT_PRESENT;
extern char *sZPOOL_CONFIG_SPARES;
extern char *sZPOOL_CONFIG_IS_SPARE;
extern char *sZPOOL_CONFIG_NPARITY;
extern char *sZPOOL_CONFIG_HOSTID;
extern char *sZPOOL_CONFIG_HOSTNAME;
extern char *sZPOOL_CONFIG_LOADED_TIME;
extern char *sZPOOL_CONFIG_UNSPARE;
extern char *sZPOOL_CONFIG_PHYS_PATH;
extern char *sZPOOL_CONFIG_IS_LOG;
extern char *sZPOOL_CONFIG_L2CACHE;
extern char *sZPOOL_CONFIG_HOLE_ARRAY;
extern char *sZPOOL_CONFIG_VDEV_CHILDREN;
extern char *sZPOOL_CONFIG_IS_HOLE;
extern char *sZPOOL_CONFIG_DDT_HISTOGRAM;
extern char *sZPOOL_CONFIG_DDT_OBJ_STATS;
extern char *sZPOOL_CONFIG_DDT_STATS;
extern char *sZPOOL_CONFIG_SPLIT;
extern char *sZPOOL_CONFIG_ORIG_GUID;
extern char *sZPOOL_CONFIG_SPLIT_GUID;
extern char *sZPOOL_CONFIG_SPLIT_LIST;
extern char *sZPOOL_CONFIG_REMOVING;
extern char *sZPOOL_CONFIG_RESILVER_TXG;
extern char *sZPOOL_CONFIG_COMMENT;
extern char *sZPOOL_CONFIG_SUSPENDED;
extern char *sZPOOL_CONFIG_TIMESTAMP;
extern char *sZPOOL_CONFIG_BOOTFS;
extern char *sZPOOL_CONFIG_MISSING_DEVICES;
extern char *sZPOOL_CONFIG_LOAD_INFO;
extern char *sZPOOL_CONFIG_REWIND_INFO;
extern char *sZPOOL_CONFIG_UNSUP_FEAT;
extern char *sZPOOL_CONFIG_ENABLED_FEAT;
extern char *sZPOOL_CONFIG_CAN_RDONLY;
extern char *sZPOOL_CONFIG_FEATURES_FOR_READ;
extern char *sZPOOL_CONFIG_FEATURE_STATS;
extern char *sZPOOL_CONFIG_ERRATA;
extern char *sZPOOL_CONFIG_OFFLINE;
extern char *sZPOOL_CONFIG_FAULTED;
extern char *sZPOOL_CONFIG_DEGRADED;
extern char *sZPOOL_CONFIG_REMOVED;
extern char *sZPOOL_CONFIG_FRU;
extern char *sZPOOL_CONFIG_AUX_STATE;
extern char *sZPOOL_REWIND_POLICY;
extern char *sZPOOL_REWIND_REQUEST;
extern char *sZPOOL_REWIND_REQUEST_TXG;
extern char *sZPOOL_REWIND_META_THRESH;
extern char *sZPOOL_REWIND_DATA_THRESH;
extern char *sZPOOL_CONFIG_LOAD_TIME;
extern char *sZPOOL_CONFIG_LOAD_DATA_ERRORS;
extern char *sZPOOL_CONFIG_REWIND_TIME;
#endif
/* SERVERWARE_ZPOOL_H */

View File

@ -1,17 +1,20 @@
package zfs
package zfs_test
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strconv"
"testing"
"github.com/bicomsystems/go-libzfs"
)
const (
TST_POOL_NAME = "TESTPOOL"
TST_DATASET_PATH = "TESTPOOL/DATASET"
)
/* ------------------------------------------------------------------------- */
// HELPERS:
var TSTPoolName = "TESTPOOL"
var TSTPoolGUID string
func CreateTmpSparse(prefix string, size int64) (path string, err error) {
sf, err := ioutil.TempFile("/tmp", prefix)
@ -26,51 +29,85 @@ func CreateTmpSparse(prefix string, size int64) (path string, err error) {
return
}
// Create 3 sparse file 5G in /tmp directory each 5G size, and use them to create mirror TESTPOOL with one spare "disk"
func TestPoolCreate(t *testing.T) {
print("TEST PoolCreate ... ")
var s1path, s2path, s3path string
var err error
var s1path, s2path, s3path string
// This will create sparse files in tmp directory,
// for purpose of creating test pool.
func createTestpoolVdisks() (err error) {
if s1path, err = CreateTmpSparse("zfs_test_", 0x140000000); err != nil {
t.Error(err)
return
}
if s2path, err = CreateTmpSparse("zfs_test_", 0x140000000); err != nil {
// try cleanup
os.Remove(s1path)
t.Error(err)
return
}
if s3path, err = CreateTmpSparse("zfs_test_", 0x140000000); err != nil {
// try cleanup
os.Remove(s1path)
os.Remove(s2path)
return
}
return
}
// Cleanup sparse files used for tests
func cleanupVDisks() {
// try cleanup
os.Remove(s1path)
os.Remove(s2path)
os.Remove(s3path)
}
/* ------------------------------------------------------------------------- */
// TESTS:
// Create 3 sparse file in /tmp directory each 5G size, and use them to create
// mirror TESTPOOL with one spare "disk"
func zpoolTestPoolCreate(t *testing.T) {
println("TEST PoolCreate ... ")
// first check if pool with same name already exist
// we don't want conflict
for {
p, err := zfs.PoolOpen(TSTPoolName)
if err != nil {
break
}
p.Close()
TSTPoolName += "0"
}
var err error
if err = createTestpoolVdisks(); err != nil {
t.Error(err)
return
}
disks := [2]string{s1path, s2path}
var vdevs, mdevs, sdevs []VDevSpec
var vdev zfs.VDevTree
var vdevs, mdevs, sdevs []zfs.VDevTree
for _, d := range disks {
mdevs = append(mdevs,
VDevSpec{Type: VDevTypeFile, Path: d})
zfs.VDevTree{Type: zfs.VDevTypeFile, Path: d})
}
sdevs = []VDevSpec{
{Type: VDevTypeFile, Path: s3path}}
vdevs = []VDevSpec{
VDevSpec{Type: VDevTypeMirror, Devices: mdevs},
VDevSpec{Type: VDevTypeSpare, Devices: sdevs},
sdevs = []zfs.VDevTree{
{Type: zfs.VDevTypeFile, Path: s3path}}
vdevs = []zfs.VDevTree{
zfs.VDevTree{Type: zfs.VDevTypeMirror, Devices: mdevs},
}
vdev.Devices = vdevs
vdev.Spares = sdevs
props := make(map[PoolProp]string)
fsprops := make(map[ZFSProp]string)
props := make(map[zfs.Prop]string)
fsprops := make(map[zfs.Prop]string)
features := make(map[string]string)
fsprops[ZFSPropMountpoint] = "none"
features["async_destroy"] = "enabled"
features["empty_bpobj"] = "enabled"
features["lz4_compress"] = "enabled"
fsprops[zfs.DatasetPropMountpoint] = "none"
features["async_destroy"] = zfs.FENABLED
features["empty_bpobj"] = zfs.FENABLED
features["lz4_compress"] = zfs.FENABLED
pool, err := PoolCreate(TST_POOL_NAME, vdevs, features, props, fsprops)
pool, err := zfs.PoolCreate(TSTPoolName, vdev, features, props, fsprops)
if err != nil {
t.Error(err)
// try cleanup
@ -80,19 +117,19 @@ func TestPoolCreate(t *testing.T) {
return
}
defer pool.Close()
// try cleanup
os.Remove(s1path)
os.Remove(s2path)
os.Remove(s3path)
println("PASS")
pguid, _ := pool.GetProperty(zfs.PoolPropGUID)
TSTPoolGUID = pguid.Value
print("PASS\n\n")
}
// Open and list all pools and them state on the system
// Then list properties of last pool in the list
func TestPoolOpenAll(t *testing.T) {
func zpoolTestPoolOpenAll(t *testing.T) {
println("TEST PoolOpenAll() ... ")
var pname string
pools, err := PoolOpenAll()
pools, err := zfs.PoolOpenAll()
if err != nil {
t.Error(err)
return
@ -114,152 +151,235 @@ func TestPoolOpenAll(t *testing.T) {
println("\tPool: ", pname, " state: ", pstate)
p.Close()
}
if len(pname) > 0 {
// test open on last pool
println("\tTry to open pool ", pname)
p, err := PoolOpen(pname)
if err != nil {
t.Error(err)
return
}
println("\tOpen pool: ", pname, " success")
println("\t", pname, " PROPERTIES:")
pc, _ := strconv.Atoi(p.Properties[PoolNumProps].Value)
if len(p.Properties) != (pc + 1) {
p.Close()
t.Error(fmt.Sprint("Number of zpool properties does not match ",
len(p.Properties), " != ", pc+1))
return
}
for key, value := range p.Properties {
pkey := PoolProp(key)
println("\t\t", p.PropertyToName(pkey), " = ", value.Value, " <- ", value.Source)
}
for key, value := range p.Features {
fmt.Printf("\t feature@%s = %s <- local\n", key, value)
}
if p.Properties[PoolPropListsnaps].Value == "off" {
println("\tlistsnapshots to on")
if err = p.SetProperty(PoolPropListsnaps, "on"); err != nil {
t.Error(err)
}
} else {
println("\tlistsnapshots to off")
if err = p.SetProperty(PoolPropListsnaps, "off"); err != nil {
t.Error(err)
}
}
if err == nil {
println("\tlistsnapshots", "is changed to ",
p.Properties[PoolPropListsnaps].Value, " <- ",
p.Properties[PoolPropListsnaps].Source)
}
p.Close()
}
println("PASS")
print("PASS\n\n")
}
func TestDatasetCreate(t *testing.T) {
print("TEST DatasetCreate(", TST_DATASET_PATH, ") ... ")
props := make(map[ZFSProp]Property)
d, err := DatasetCreate(TST_DATASET_PATH, DatasetTypeFilesystem, props)
if err != nil {
t.Error(err)
return
}
d.Close()
println("PASS")
}
func TestDatasetOpen(t *testing.T) {
print("TEST DatasetOpen(", TST_DATASET_PATH, ") ... ")
d, err := DatasetOpen(TST_DATASET_PATH)
if err != nil {
t.Error(err)
return
}
d.Close()
println("PASS")
}
func printDatasets(ds []Dataset) error {
for _, d := range ds {
path, err := d.Path()
if err != nil {
return err
}
println("\t", path)
if len(d.Children) > 0 {
printDatasets(d.Children)
}
}
return nil
}
func TestDatasetOpenAll(t *testing.T) {
println("TEST DatasetOpenAll()/DatasetCloseAll() ... ")
ds, err := DatasetOpenAll()
if err != nil {
t.Error(err)
return
}
if err = printDatasets(ds); err != nil {
DatasetCloseAll(ds)
t.Error(err)
return
}
DatasetCloseAll(ds)
println("PASS")
}
func TestDatasetDestroy(t *testing.T) {
print("TEST DATASET Destroy()", TST_DATASET_PATH, " ... ")
d, err := DatasetOpen(TST_DATASET_PATH)
if err != nil {
t.Error(err)
return
}
defer d.Close()
if err = d.Destroy(false); err != nil {
t.Error(err)
return
}
println("PASS")
}
func TestPoolDestroy(t *testing.T) {
print("TEST POOL Destroy()", TST_POOL_NAME, " ... ")
p, err := PoolOpen(TST_POOL_NAME)
func zpoolTestPoolDestroy(t *testing.T) {
println("TEST POOL Destroy( ", TSTPoolName, " ) ... ")
p, err := zfs.PoolOpen(TSTPoolName)
if err != nil {
t.Error(err)
return
}
defer p.Close()
if err = p.Destroy("Test of pool destroy (" + TST_POOL_NAME + ")"); err != nil {
if err = p.Destroy(TSTPoolName); err != nil {
t.Error(err.Error())
return
}
println("PASS")
print("PASS\n\n")
}
func TestFailPoolOpen(t *testing.T) {
print("TEST failing to open pool ... ")
func zpoolTestFailPoolOpen(t *testing.T) {
println("TEST open of non existing pool ... ")
pname := "fail to open this pool"
p, err := PoolOpen(pname)
p, err := zfs.PoolOpen(pname)
if err != nil {
println("PASS")
print("PASS\n\n")
return
}
t.Error("PoolOpen pass when it should fail")
p.Close()
}
func ExamplePoolProp() {
if pool, err := PoolOpen("SSD"); err == nil {
print("Pool size is: ", pool.Properties[PoolPropSize].Value)
func zpoolTestExport(t *testing.T) {
println("TEST POOL Export( ", TSTPoolName, " ) ... ")
p, err := zfs.PoolOpen(TSTPoolName)
if err != nil {
t.Error(err)
return
}
p.Export(false, "Test exporting pool")
defer p.Close()
print("PASS\n\n")
}
func zpoolTestExportForce(t *testing.T) {
println("TEST POOL ExportForce( ", TSTPoolName, " ) ... ")
p, err := zfs.PoolOpen(TSTPoolName)
if err != nil {
t.Error(err)
return
}
p.ExportForce("Test force exporting pool")
defer p.Close()
print("PASS\n\n")
}
func zpoolTestImport(t *testing.T) {
println("TEST POOL Import( ", TSTPoolName, " ) ... ")
p, err := zfs.PoolImport(TSTPoolName, []string{"/tmp"})
if err != nil {
t.Error(err)
return
}
defer p.Close()
print("PASS\n\n")
}
func zpoolTestImportByGUID(t *testing.T) {
println("TEST POOL ImportByGUID( ", TSTPoolGUID, " ) ... ")
p, err := zfs.PoolImportByGUID(TSTPoolGUID, []string{"/tmp"})
if err != nil {
t.Error(err)
return
}
defer p.Close()
print("PASS\n\n")
}
func printVDevTree(vt zfs.VDevTree, pref string) {
first := pref + vt.Name
fmt.Printf("%-30s | %-10s | %-10s | %s\n", first, vt.Type,
vt.Stat.State.String(), vt.Path)
for _, v := range vt.Devices {
printVDevTree(v, " "+pref)
}
if len(vt.Spares) > 0 {
fmt.Println("spares:")
for _, v := range vt.Spares {
printVDevTree(v, " "+pref)
}
}
if len(vt.L2Cache) > 0 {
fmt.Println("l2cache:")
for _, v := range vt.L2Cache {
printVDevTree(v, " "+pref)
}
}
}
func zpoolTestPoolImportSearch(t *testing.T) {
println("TEST PoolImportSearch")
pools, err := zfs.PoolImportSearch([]string{"/tmp"})
if err != nil {
t.Error(err.Error())
return
}
for _, p := range pools {
println()
println("---------------------------------------------------------------")
println("pool: ", p.Name)
println("guid: ", p.GUID)
println("state: ", p.State.String())
fmt.Printf("%-30s | %-10s | %-10s | %s\n", "NAME", "TYPE", "STATE", "PATH")
println("---------------------------------------------------------------")
printVDevTree(p.VDevs, "")
}
print("PASS\n\n")
}
func zpoolTestPoolProp(t *testing.T) {
println("TEST PoolProp on ", TSTPoolName, " ... ")
if pool, err := zfs.PoolOpen(TSTPoolName); err == nil {
defer pool.Close()
// Turn on snapshot listing for pool
pool.SetProperty(PoolPropListsnaps, "on")
pool.SetProperty(zfs.PoolPropListsnaps, "off")
// Verify change is succesfull
if pool.Properties[zfs.PoolPropListsnaps].Value != "off" {
t.Error(fmt.Errorf("Update of pool property failed"))
return
}
// Test fetching property
propHealth, err := pool.GetProperty(zfs.PoolPropHealth)
if err != nil {
t.Error(err)
return
}
println("Pool property health: ", propHealth.Value)
propGUID, err := pool.GetProperty(zfs.PoolPropGUID)
if err != nil {
t.Error(err)
return
}
println("Pool property GUID: ", propGUID.Value)
// this test pool should not be bootable
prop, err := pool.GetProperty(zfs.PoolPropBootfs)
if err != nil {
t.Error(err)
return
}
if prop.Value != "-" {
t.Errorf("Failed at bootable fs property evaluation")
return
}
// fetch all properties
if err = pool.ReloadProperties(); err != nil {
t.Error(err)
return
}
} else {
t.Error(err)
return
}
print("PASS\n\n")
}
func zpoolTestPoolStatusAndState(t *testing.T) {
println("TEST pool Status/State ( ", TSTPoolName, " ) ... ")
pool, err := zfs.PoolOpen(TSTPoolName)
if err != nil {
t.Error(err.Error())
return
}
defer pool.Close()
if _, err = pool.Status(); err != nil {
t.Error(err.Error())
return
}
var pstate zfs.PoolState
if pstate, err = pool.State(); err != nil {
t.Error(err.Error())
return
}
println("POOL", TSTPoolName, "state:", zfs.PoolStateToName(pstate))
print("PASS\n\n")
}
func zpoolTestPoolVDevTree(t *testing.T) {
var vdevs zfs.VDevTree
println("TEST pool VDevTree ( ", TSTPoolName, " ) ... ")
pool, err := zfs.PoolOpen(TSTPoolName)
if err != nil {
t.Error(err.Error())
return
}
defer pool.Close()
vdevs, err = pool.VDevTree()
if err != nil {
t.Error(err.Error())
return
}
fmt.Printf("%-30s | %-10s | %-10s | %s\n", "NAME", "TYPE", "STATE", "PATH")
println("---------------------------------------------------------------")
printVDevTree(vdevs, "")
print("PASS\n\n")
}
/* ------------------------------------------------------------------------- */
// EXAMPLES:
func ExamplePoolProp() {
if pool, err := zfs.PoolOpen("SSD"); err == nil {
print("Pool size is: ", pool.Properties[zfs.PoolPropSize].Value)
// Turn on snapshot listing for pool
pool.SetProperty(zfs.PoolPropListsnaps, "on")
println("Changed property",
zfs.PoolPropertyToName(zfs.PoolPropListsnaps), "to value:",
pool.Properties[zfs.PoolPropListsnaps].Value)
prop, err := pool.GetProperty(zfs.PoolPropHealth)
if err != nil {
panic(err)
}
println("Update and print out pool health:", prop.Value)
} else {
print("Error: ", err)
}
@ -268,7 +388,7 @@ func ExamplePoolProp() {
// Open and list all pools on system with them properties
func ExamplePoolOpenAll() {
// Lets open handles to all active pools on system
pools, err := PoolOpenAll()
pools, err := zfs.PoolOpenAll()
if err != nil {
println(err)
}
@ -277,18 +397,19 @@ func ExamplePoolOpenAll() {
for _, p := range pools {
// Print fancy header
fmt.Printf("\n -----------------------------------------------------------\n")
fmt.Printf(" POOL: %49s \n", p.Properties[PoolPropName].Value)
fmt.Printf(" POOL: %49s \n", p.Properties[zfs.PoolPropName].Value)
fmt.Printf("|-----------------------------------------------------------|\n")
fmt.Printf("| PROPERTY | VALUE | SOURCE |\n")
fmt.Printf("|-----------------------------------------------------------|\n")
// Iterate pool properties and print name, value and source
for key, prop := range p.Properties {
pkey := PoolProp(key)
if pkey == PoolPropName {
pkey := zfs.Prop(key)
if pkey == zfs.PoolPropName {
continue // Skip name its already printed above
}
fmt.Printf("|%14s | %20s | %15s |\n", p.PropertyToName(pkey),
fmt.Printf("|%14s | %20s | %15s |\n",
zfs.PoolPropertyToName(pkey),
prop.Value, prop.Source)
println("")
}
@ -302,33 +423,36 @@ func ExamplePoolOpenAll() {
func ExamplePoolCreate() {
disks := [2]string{"/dev/disk/by-id/ATA-123", "/dev/disk/by-id/ATA-456"}
var vdevs, mdevs, sdevs []VDevSpec
var vdev zfs.VDevTree
var vdevs, mdevs, sdevs []zfs.VDevTree
// build mirror devices specs
for _, d := range disks {
mdevs = append(mdevs,
VDevSpec{Type: VDevTypeDisk, Path: d})
zfs.VDevTree{Type: zfs.VDevTypeDisk, Path: d})
}
// spare device specs
sdevs = []VDevSpec{
{Type: VDevTypeDisk, Path: "/dev/disk/by-id/ATA-789"}}
sdevs = []zfs.VDevTree{
{Type: zfs.VDevTypeDisk, Path: "/dev/disk/by-id/ATA-789"}}
// pool specs
vdevs = []VDevSpec{
VDevSpec{Type: VDevTypeMirror, Devices: mdevs},
VDevSpec{Type: VDevTypeSpare, Devices: sdevs},
vdevs = []zfs.VDevTree{
zfs.VDevTree{Type: zfs.VDevTypeMirror, Devices: mdevs},
}
vdev.Devices = vdevs
vdev.Spares = sdevs
// pool properties
props := make(map[PoolProp]string)
props := make(map[zfs.Prop]string)
// root dataset filesystem properties
fsprops := make(map[ZFSProp]string)
fsprops := make(map[zfs.Prop]string)
// pool features
features := make(map[string]string)
// Turn off auto mounting by ZFS
fsprops[ZFSPropMountpoint] = "none"
fsprops[zfs.DatasetPropMountpoint] = "none"
// Enable some features
features["async_destroy"] = "enabled"
@ -337,7 +461,7 @@ func ExamplePoolCreate() {
// Based on specs formed above create test pool as 2 disk mirror and
// one spare disk
pool, err := PoolCreate("TESTPOOL", vdevs, features, props, fsprops)
pool, err := zfs.PoolCreate("TESTPOOL", vdev, features, props, fsprops)
if err != nil {
println("Error: ", err.Error())
return
@ -349,7 +473,7 @@ func ExamplePool_Destroy() {
pname := "TESTPOOL"
// Need handle to pool at first place
p, err := PoolOpen(pname)
p, err := zfs.PoolOpen(pname)
if err != nil {
println("Error: ", err.Error())
return
@ -364,32 +488,76 @@ func ExamplePool_Destroy() {
}
}
// Example of creating ZFS volume
func ExampleDatasetCreate() {
// Create map to represent ZFS dataset properties. This is equivalent to
// list of properties you can get from ZFS CLI tool, and some more
// internally used by libzfs.
props := make(map[ZFSProp]Property)
// I choose to create (block) volume 1GiB in size. Size is just ZFS dataset
// property and this is done as map of strings. So, You have to either
// specify size as base 10 number in string, or use strconv package or
// similar to convert in to string (base 10) from numeric type.
strSize := "1073741824"
props[ZFSPropVolsize] = Property{Value: strSize}
// In addition I explicitly choose some more properties to be set.
props[ZFSPropVolblocksize] = Property{Value: "4096"}
props[ZFSPropReservation] = Property{Value: strSize}
// Lets create desired volume
d, err := DatasetCreate("TESTPOOL/VOLUME1", DatasetTypeVolume, props)
func ExamplePoolImport() {
p, err := zfs.PoolImport("TESTPOOL", []string{"/dev/disk/by-id"})
if err != nil {
println(err.Error())
return
panic(err)
}
p.Close()
}
func ExamplePool_Export() {
p, err := zfs.PoolOpen("TESTPOOL")
if err != nil {
panic(err)
}
defer p.Close()
if err = p.Export(false, "Example exporting pool"); err != nil {
panic(err)
}
}
func ExamplePool_ExportForce() {
p, err := zfs.PoolOpen("TESTPOOL")
if err != nil {
panic(err)
}
defer p.Close()
if err = p.ExportForce("Example exporting pool"); err != nil {
panic(err)
}
}
func ExamplePool_State() {
p, err := zfs.PoolOpen("TESTPOOL")
if err != nil {
panic(err)
}
defer p.Close()
pstate, err := p.State()
if err != nil {
panic(err)
}
println("POOL TESTPOOL state:", zfs.PoolStateToName(pstate))
}
func TestPool_VDevTree(t *testing.T) {
type fields struct {
poolName string
}
tests := []struct {
name string
fields fields
wantErr bool
}{
// TODO: Add test cases.
{
name: "test1",
fields: fields{"NETSTOR"},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
pool, _ := zfs.PoolOpen(tt.fields.poolName)
defer pool.Close()
gotVdevs, err := pool.VDevTree()
if (err != nil) != tt.wantErr {
t.Errorf("Pool.VDevTree() error = %v, wantErr %v", err, tt.wantErr)
return
}
jsonData, _ := json.MarshalIndent(gotVdevs, "", "\t")
t.Logf("gotVdevs: %s", string(jsonData))
})
}
// Dataset have to be closed for memory cleanup
defer d.Close()
println("Created zfs volume TESTPOOL/VOLUME1")
}

37
zpool_vdev.c Normal file
View File

@ -0,0 +1,37 @@
#include <libzfs.h>
#include <memory.h>
#include <string.h>
#include <stdio.h>
#include <sys/fs/zfs.h>
#include "common.h"
#include "zpool.h"
__uint64_t set_zpool_vdev_online(zpool_list_t *pool, const char *path, int flags) {
vdev_state_t newstate = VDEV_STATE_UNKNOWN;
zpool_vdev_online(pool->zph, path, flags, &newstate);
return newstate;
}
int set_zpool_vdev_offline(zpool_list_t *pool, const char *path, boolean_t istmp, boolean_t force) {
int ret = 0;
// if (force) {
// uint64_t guid = zpool_vdev_path_to_guid(pool->zph, path);
// vdev_aux_t aux;
// if (istmp == B_FALSE) {
// /* Force the fault to persist across imports */
// aux = VDEV_AUX_EXTERNAL_PERSIST;
// } else {
// aux = VDEV_AUX_EXTERNAL;
// }
// if (guid == 0 || zpool_vdev_fault(pool->zph, guid, aux) != 0)
// ret = 1;
// } else {
if (zpool_vdev_offline(pool->zph, path, istmp) != 0)
ret = 1;
// }
return ret;
}

143
zpool_vdev.go Normal file
View File

@ -0,0 +1,143 @@
package zfs
// #include <stdlib.h>
// #include <libzfs.h>
// #include "common.h"
// #include "zpool.h"
// #include "zfs.h"
import "C"
import (
"fmt"
"unsafe"
)
// Online try to set dev online
// expand - expand storage
func (pool *Pool) Online(expand bool, devs ...string) (err error) {
cflags := C.int(0)
if expand {
cflags = C.ZFS_ONLINE_EXPAND
}
for _, dev := range devs {
csdev := C.CString(dev)
var newstate VDevState
if newstate = VDevState(C.set_zpool_vdev_online(pool.list, csdev, cflags)); newstate != VDevStateUnknown {
if newstate != VDevStateHealthy {
err = fmt.Errorf(
"Device '%s' onlined, but remains in faulted state",
dev)
}
} else {
err = LastError()
}
C.free(unsafe.Pointer(csdev))
}
return
}
// Offline Take the device/s in offline state
func (pool *Pool) Offline(force bool, devs ...string) (err error) {
return pool.offline(false, force, devs...)
}
// OfflineTemp Take the device/s in offline state temporary,
// upon reboot, the specified physical device reverts to its previous state.
// force - Force the device into a faulted state.
func (pool *Pool) OfflineTemp(force bool, devs ...string) (err error) {
return pool.offline(true, force, devs...)
}
// temp - Upon reboot, the specified physical device reverts to its previous state.
// force - Force the device into a faulted state.
func (pool *Pool) offline(temp, force bool, devs ...string) (err error) {
for _, dev := range devs {
csdev := C.CString(dev)
var newstate VDevState
if newstate = VDevState(C.set_zpool_vdev_offline(pool.list, csdev, booleanT(temp), booleanT(force))); newstate != VDevStateUnknown {
if newstate != VDevStateHealthy {
err = fmt.Errorf(
"Device '%s' offlined, but remains in faulted state",
dev)
}
} else {
err = LastError()
}
C.free(unsafe.Pointer(csdev))
}
return
}
// Clear - Clear all errors associated with a pool or a particular device.
func (pool *Pool) Clear(device string) (err error) {
csdev := C.CString(device)
if len(device) == 0 {
csdev = nil
}
if sc := C.do_zpool_clear(pool.list, csdev, C.ZPOOL_NO_REWIND); sc != 0 {
err = fmt.Errorf("Pool clear failed")
}
return
}
// Attach test
// func (pool *Pool) attach(props PoolProperties, devs ...string) (err error) {
// cprops := toCPoolProperties(props)
// if cprops != nil {
// defer C.nvlist_free(cprops)
// } else {
// return fmt.Errorf("Out of memory [Pool Attach properties]")
// }
// cdevs := C.alloc_cstrings(C.int(len(devs)))
// if cdevs != nil {
// defer C.free(unsafe.Pointer(cdevs))
// } else {
// return fmt.Errorf("Out of memory [Pool Attach args]")
// }
// for i, dp := range devs {
// tmp := C.CString(dp)
// if tmp != nil {
// defer C.free(unsafe.Pointer(tmp))
// } else {
// return fmt.Errorf("Out of memory [Pool Attach dev]")
// }
// C.strings_setat(cdevs, C.int(i), tmp)
// }
// // vroot := C.make_root_vdev(pool.list.zph, cprops, 0, 0, 0, 0, len(devs), cdevs)
// var nvroot *C.struct_nvlist
// if r := C.nvlist_alloc(&nvroot, C.NV_UNIQUE_NAME, 0); r != 0 {
// err = errors.New("Failed to allocate root vdev")
// return
// }
// csTypeRoot := C.CString(string(VDevTypeRoot))
// r := C.nvlist_add_string(nvroot, C.sZPOOL_CONFIG_TYPE,
// csTypeRoot)
// C.free(unsafe.Pointer(csTypeRoot))
// if r != 0 {
// err = errors.New("Failed to allocate root vdev")
// return
// }
// defer C.nvlist_free(nvroot)
// // Now we need to build specs (vdev hierarchy)
// if err = buildVDevTree(nvroot, VDevTypeRoot, vdev.Devices, vdev.Spares, vdev.L2Cache, props); err != nil {
// return
// }
// return
// }
// func (pool *Pool) AttachForce(devs ...string) (err error) {
// return
// }
// func (pool *Pool) Detach(devs ...string) (err error) {
// return
// }
// func (pool *Pool) DetachForce(devs ...string) (err error) {
// return
// }
// func (pool *Pool) Replace(devs ...string) (err error) {
// return
// }