- Starting from here, already useful initial implementation :-)

This commit is contained in:
Faruk Kasumovic 2015-04-19 23:25:52 +02:00
commit cf7673fe41
10 changed files with 2235 additions and 0 deletions

12
LICENSE Normal file
View File

@ -0,0 +1,12 @@
Copyright (c) 2015, Faruk Kasumovic
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

60
README.md Normal file
View File

@ -0,0 +1,60 @@
# Introduction
Currently implements basic manipulation of ZFS pools and data sets. Plan is to add more in further development, improve documentation with more examples, and add more tests. _go-libzfs_ use libzfs C library and does not wrap OpenZFS CLI tools. That way it ensure best performance. Per my personal opinion its more reliable way to do it, and that libzfs is less subject of possible changes then CLI tools. Goal is to let easy using and manipulating OpenZFS form with in go, and tries to map libzfs C library in to go style package respecting golang common practice.
[![GoDoc](https://godoc.org/github.com/fkasumovic/go-libzfs?status.svg)](https://godoc.org/github.com/fkasumovic/go-libzfs)
# Requirements:
- OpenZFS and libzfs with development headers installed.
- Developed using go1.4.2
# Installing
```sh
go get github.com/fkasumovic/go-libzfs
```
# Testing
```sh
# On command line shell run
cd $GOPATH/src/github.com/fkasumovic/go-libzfs
go test
```
# Usage example
```go
// Create map to represent ZFS dataset properties. This is equivalent to
// list of properties you can get from ZFS CLI tool, and some more
// internally used by libzfs.
props := make(map[ZFSProp]Property)
// I choose to create (block) volume 1GiB in size. Size is just ZFS dataset
// property and this is done as map of strings. So, You have to either
// specify size as base 10 number in string, or use strconv package or
// similar to convert in to string (base 10) from numeric type.
strSize := "1073741824"
props[ZFSPropVolsize] = Property{Value: strSize}
// In addition I explicitly choose some more properties to be set.
props[ZFSPropVolblocksize] = Property{Value: "4096"}
props[ZFSPropReservation] = Property{Value: strSize}
// Lets create desired volume
d, err := DatasetCreate("TESTPOOL/VOLUME1", DatasetTypeVolume, props)
if err != nil {
println(err.Error())
return
}
// Dataset have to be closed for memory cleanup
defer d.Close()
println("Created zfs volume TESTPOOL/VOLUME1")
```
## Special thanks to
- [Bicom Systems](http://www.bicomsystems.com) for supporting this little project and that way making it possible.
- [OpenZFS](http://open-zfs.org) as the main ZFS software collective.

262
common.go Normal file
View File

@ -0,0 +1,262 @@
// Implements basic manipulation of ZFS pools and data sets.
// Use libzfs C library instead CLI zfs tools, with goal
// to let using and manipulating OpenZFS form with in go project.
//
// TODO: Adding to the pool. (Add the given vdevs to the pool)
// TODO: Scan for pools.
//
//
package zfs
/*
#cgo CFLAGS: -I /usr/include/libzfs -I /usr/include/libspl -DHAVE_IOCTL_IN_SYS_IOCTL_H
#cgo LDFLAGS: -lzfs -lzpool -lnvpair
#include <stdlib.h>
#include <libzfs.h>
#include "zpool.h"
#include "zfs.h"
*/
import "C"
import (
"errors"
)
type VDevType string
var libzfs_handle *C.struct_libzfs_handle
func init() {
libzfs_handle = C.libzfs_init()
return
}
// Types of Virtual Devices
const (
VDevTypeRoot VDevType = "root"
VDevTypeMirror = "mirror"
VDevTypeReplacing = "replacing"
VDevTypeRaidz = "raidz"
VDevTypeDisk = "disk"
VDevTypeFile = "file"
VDevTypeMissing = "missing"
VDevTypeHole = "hole"
VDevTypeSpare = "spare"
VDevTypeLog = "log"
VDevTypeL2cache = "l2cache"
)
type PoolProp int
type ZFSProp int
type PoolStatus int
type PoolState uint64
// Zfs pool or dataset property
type Property struct {
Value string
Source string
}
// Pool status
const (
/*
* The following correspond to faults as defined in the (fault.fs.zfs.*)
* event namespace. Each is associated with a corresponding message ID.
*/
PoolStatusCorrupt_cache PoolStatus = iota /* corrupt /kernel/drv/zpool.cache */
PoolStatusMissing_dev_r /* missing device with replicas */
PoolStatusMissing_dev_nr /* missing device with no replicas */
PoolStatusCorrupt_label_r /* bad device label with replicas */
PoolStatusCorrupt_label_nr /* bad device label with no replicas */
PoolStatusBad_guid_sum /* sum of device guids didn't match */
PoolStatusCorrupt_pool /* pool metadata is corrupted */
PoolStatusCorrupt_data /* data errors in user (meta)data */
PoolStatusFailing_dev /* device experiencing errors */
PoolStatusVersion_newer /* newer on-disk version */
PoolStatusHostid_mismatch /* last accessed by another system */
PoolStatusIo_failure_wait /* failed I/O, failmode 'wait' */
PoolStatusIo_failure_continue /* failed I/O, failmode 'continue' */
PoolStatusBad_log /* cannot read log chain(s) */
PoolStatusErrata /* informational errata available */
/*
* If the pool has unsupported features but can still be opened in
* read-only mode, its status is ZPOOL_STATUS_UNSUP_FEAT_WRITE. If the
* pool has unsupported features but cannot be opened at all, its
* status is ZPOOL_STATUS_UNSUP_FEAT_READ.
*/
PoolStatusUnsup_feat_read /* unsupported features for read */
PoolStatusUnsup_feat_write /* unsupported features for write */
/*
* These faults have no corresponding message ID. At the time we are
* checking the status, the original reason for the FMA fault (I/O or
* checksum errors) has been lost.
*/
PoolStatusFaulted_dev_r /* faulted device with replicas */
PoolStatusFaulted_dev_nr /* faulted device with no replicas */
/*
* The following are not faults per se, but still an error possibly
* requiring administrative attention. There is no corresponding
* message ID.
*/
PoolStatusVersion_older /* older legacy on-disk version */
PoolStatusFeat_disabled /* supported features are disabled */
PoolStatusResilvering /* device being resilvered */
PoolStatusOffline_dev /* device online */
PoolStatusRemoved_dev /* removed device */
/*
* Finally, the following indicates a healthy pool.
*/
PoolStatusOk
)
// Possible ZFS pool states
const (
PoolStateActive PoolState = iota /* In active use */
PoolStateExported /* Explicitly exported */
PoolStateDestroyed /* Explicitly destroyed */
PoolStateSpare /* Reserved for hot spare use */
PoolStateL2cache /* Level 2 ARC device */
PoolStateUninitialized /* Internal spa_t state */
PoolStateUnavail /* Internal libzfs state */
PoolStatePotentiallyActive /* Internal libzfs state */
)
// Pool properties. Enumerates available ZFS pool properties. Use it to access
// pool properties either to read or set soecific property.
const (
PoolPropName PoolProp = iota
PoolPropSize
PoolPropCapacity
PoolPropAltroot
PoolPropHealth
PoolPropGuid
PoolPropVersion
PoolPropBootfs
PoolPropDelegation
PoolPropAutoreplace
PoolPropCachefile
PoolPropFailuremode
PoolPropListsnaps
PoolPropAutoexpand
PoolPropDedupditto
PoolPropDedupratio
PoolPropFree
PoolPropAllocated
PoolPropReadonly
PoolPropAshift
PoolPropComment
PoolPropExpandsz
PoolPropFreeing
PoolNumProps
)
/*
* Dataset properties are identified by these constants and must be added to
* the end of this list to ensure that external consumers are not affected
* by the change. If you make any changes to this list, be sure to update
* the property table in module/zcommon/zfs_prop.c.
*/
const (
ZFSPropType ZFSProp = iota
ZFSPropCreation
ZFSPropUsed
ZFSPropAvailable
ZFSPropReferenced
ZFSPropCompressratio
ZFSPropMounted
ZFSPropOrigin
ZFSPropQuota
ZFSPropReservation
ZFSPropVolsize
ZFSPropVolblocksize
ZFSPropRecordsize
ZFSPropMountpoint
ZFSPropSharenfs
ZFSPropChecksum
ZFSPropCompression
ZFSPropAtime
ZFSPropDevices
ZFSPropExec
ZFSPropSetuid
ZFSPropReadonly
ZFSPropZoned
ZFSPropSnapdir
ZFSPropPrivate /* not exposed to user, temporary */
ZFSPropAclinherit
ZFSPropCreatetxg /* not exposed to the user */
ZFSPropName /* not exposed to the user */
ZFSPropCanmount
ZFSPropIscsioptions /* not exposed to the user */
ZFSPropXattr
ZFSPropNumclones /* not exposed to the user */
ZFSPropCopies
ZFSPropVersion
ZFSPropUtf8only
ZFSPropNormalize
ZFSPropCase
ZFSPropVscan
ZFSPropNbmand
ZFSPropSharesmb
ZFSPropRefquota
ZFSPropRefreservation
ZFSPropGuid
ZFSPropPrimarycache
ZFSPropSecondarycache
ZFSPropUsedsnap
ZFSPropUsedds
ZFSPropUsedchild
ZFSPropUsedrefreserv
ZFSPropUseraccounting /* not exposed to the user */
ZFSPropStmf_shareinfo /* not exposed to the user */
ZFSPropDefer_destroy
ZFSPropUserrefs
ZFSPropLogbias
ZFSPropUnique /* not exposed to the user */
ZFSPropObjsetid /* not exposed to the user */
ZFSPropDedup
ZFSPropMlslabel
ZFSPropSync
ZFSPropRefratio
ZFSPropWritten
ZFSPropClones
ZFSPropLogicalused
ZFSPropLogicalreferenced
ZFSPropInconsistent /* not exposed to the user */
ZFSPropSnapdev
ZFSPropAcltype
ZFSPropSelinux_context
ZFSPropSelinux_fscontext
ZFSPropSelinux_defcontext
ZFSPropSelinux_rootcontext
ZFSPropRelatime
ZFSPropRedundant_metadata
ZFSNumProps
)
// Get last underlying libzfs error description if any
func LastError() (err error) {
errno := C.libzfs_errno(libzfs_handle)
if errno == 0 {
return nil
}
return errors.New(C.GoString(C.libzfs_error_description(libzfs_handle)))
}
// Force clear of any last error set by undeliying libzfs
func ClearLastError() (err error) {
err = LastError()
C.clear_last_error(libzfs_handle)
return
}
func boolean_t(b bool) (r C.boolean_t) {
if b {
return 1
}
return 0
}

96
zfs.c Normal file
View File

@ -0,0 +1,96 @@
/* C wrappers around some zfs calls and C in general that should simplify
* using libzfs from go language, make go code shorter and more readable.
*/
#include <libzfs.h>
#include <memory.h>
#include <string.h>
#include <stdio.h>
#include "zpool.h"
#include "zfs.h"
dataset_list_t *create_dataset_list_item() {
dataset_list_t *zlist = malloc(sizeof(dataset_list_t));
memset(zlist, 0, sizeof(dataset_list_t));
return zlist;
}
void dataset_list_close(dataset_list_t *list) {
zfs_close(list->zh);
free(list);
}
int dataset_list_callb(zfs_handle_t *dataset, void *data) {
dataset_list_t **lroot = (dataset_list_t**)data;
dataset_list_t *nroot = create_dataset_list_item();
if ( !((*lroot)->zh) ) {
(*lroot)->zh = dataset;
} else {
nroot->zh = dataset;
nroot->pnext = (void*)*lroot;
*lroot = nroot;
}
return 0;
}
int dataset_list_root(libzfs_handle_t *libzfs, dataset_list_t **first) {
int err = 0;
dataset_list_t *zlist = create_dataset_list_item();
err = zfs_iter_root(libzfs, dataset_list_callb, &zlist);
if ( zlist->zh ) {
*first = zlist;
} else {
*first = 0;
free(zlist);
}
return err;
}
dataset_list_t *dataset_next(dataset_list_t *dataset) {
return dataset->pnext;
}
int dataset_list_children(zfs_handle_t *zfs, dataset_list_t **first) {
int err = 0;
dataset_list_t *zlist = create_dataset_list_item();
err = zfs_iter_children(zfs, dataset_list_callb, &zlist);
if ( zlist->zh ) {
*first = zlist;
} else {
*first = 0;
free(zlist);
}
return err;
}
int read_dataset_property(zfs_handle_t *zh, property_list_t *list, int prop) {
int r = 0;
zprop_source_t source;
char statbuf[INT_MAX_VALUE];
r = zfs_prop_get(zh, prop,
list->value, INT_MAX_VALUE, &source, statbuf, INT_MAX_VALUE, 1);
if (r == 0) {
// strcpy(list->name, zpool_prop_to_name(prop));
zprop_source_tostr(list->source, source);
}
list->property = (int)prop;
return r;
}
int clear_last_error(libzfs_handle_t *hdl) {
zfs_standard_error(hdl, EZFS_SUCCESS, "success");
return 0;
}
char** alloc_strings(int size) {
return malloc(size*sizeof(char*));
}
void strings_setat(char **a, int at, char *v) {
a[at] = v;
}

376
zfs.go Normal file
View File

@ -0,0 +1,376 @@
package zfs
// #include <stdlib.h>
// #include <libzfs.h>
// #include "zpool.h"
// #include "zfs.h"
import "C"
import (
"errors"
)
const (
msgDatasetIsNil = "Dataset handle not initialized or its closed"
)
type DatasetType int32
const (
DatasetTypeFilesystem DatasetType = (1 << 0)
DatasetTypeSnapshot = (1 << 1)
DatasetTypeVolume = (1 << 2)
DatasetTypePool = (1 << 3)
DatasetTypeBookmark = (1 << 4)
)
type Dataset struct {
list *C.dataset_list_t
Type DatasetType
Properties map[ZFSProp]Property
Children []Dataset
}
func (d *Dataset) openChildren() (err error) {
var dataset Dataset
d.Children = make([]Dataset, 0, 5)
errcode := C.dataset_list_children(d.list.zh, &(dataset.list))
for dataset.list != nil {
dataset.Type = DatasetType(C.zfs_get_type(dataset.list.zh))
dataset.Properties = make(map[ZFSProp]Property)
err = dataset.ReloadProperties()
if err != nil {
return
}
d.Children = append(d.Children, dataset)
dataset.list = C.dataset_next(dataset.list)
}
if errcode != 0 {
err = LastError()
return
}
for ci, _ := range d.Children {
if err = d.Children[ci].openChildren(); err != nil {
return
}
}
return
}
// Recursive get handles to all available datasets on system
// (file-systems, volumes or snapshots).
func DatasetOpenAll() (datasets []Dataset, err error) {
var dataset Dataset
errcode := C.dataset_list_root(libzfs_handle, &dataset.list)
for dataset.list != nil {
dataset.Type = DatasetType(C.zfs_get_type(dataset.list.zh))
err = dataset.ReloadProperties()
if err != nil {
return
}
datasets = append(datasets, dataset)
dataset.list = C.dataset_next(dataset.list)
}
if errcode != 0 {
err = LastError()
return
}
for ci, _ := range datasets {
if err = datasets[ci].openChildren(); err != nil {
return
}
}
return
}
// Close all datasets in slice and all of its recursive children datasets
func DatasetCloseAll(datasets []Dataset) {
for _, d := range datasets {
d.Close()
}
}
// Open dataset and all of its recursive children datasets
func DatasetOpen(path string) (d Dataset, err error) {
d.list = C.create_dataset_list_item()
d.list.zh = C.zfs_open(libzfs_handle, C.CString(path), 0xF)
if d.list.zh == nil {
err = LastError()
return
}
d.Type = DatasetType(C.zfs_get_type(d.list.zh))
d.Properties = make(map[ZFSProp]Property)
err = d.ReloadProperties()
if err != nil {
return
}
err = d.openChildren()
return
}
func datasetPropertiesTo_nvlist(props map[ZFSProp]Property) (
cprops *C.nvlist_t, err error) {
// convert properties to nvlist C type
r := C.nvlist_alloc(&cprops, C.NV_UNIQUE_NAME, 0)
if r != 0 {
err = errors.New("Failed to allocate properties")
return
}
for prop, value := range props {
r := C.nvlist_add_string(
cprops, C.zfs_prop_to_name(
C.zfs_prop_t(prop)), C.CString(value.Value))
if r != 0 {
err = errors.New("Failed to convert property")
return
}
}
return
}
// Create a new filesystem or volume on path representing pool/dataset or pool/parent/dataset
func DatasetCreate(path string, dtype DatasetType,
props map[ZFSProp]Property) (d Dataset, err error) {
var cprops *C.nvlist_t
if cprops, err = datasetPropertiesTo_nvlist(props); err != nil {
return
}
defer C.nvlist_free(cprops)
errcode := C.zfs_create(libzfs_handle, C.CString(path),
C.zfs_type_t(dtype), cprops)
if errcode != 0 {
err = LastError()
}
return
}
// Close dataset and all its recursive children datasets (close handle and cleanup dataset object/s from memory)
func (d *Dataset) Close() {
if d.list != nil && d.list.zh != nil {
C.dataset_list_close(d.list)
}
for _, cd := range d.Children {
cd.Close()
}
}
func (d *Dataset) Destroy(Defer bool) (err error) {
if d.list != nil {
if ec := C.zfs_destroy(d.list.zh, boolean_t(Defer)); ec != 0 {
err = LastError()
}
} else {
err = errors.New(msgDatasetIsNil)
}
return
}
func (d *Dataset) Pool() (p Pool, err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
p.list = C.create_zpool_list_item()
p.list.zph = C.zfs_get_pool_handle(d.list.zh)
if p.list != nil {
err = p.ReloadProperties()
return
}
err = LastError()
return
}
func (d *Dataset) ReloadProperties() (err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
var plist *C.property_list_t
plist = C.new_property_list()
defer C.free_properties(plist)
d.Properties = make(map[ZFSProp]Property)
for prop := ZFSPropType; prop < ZFSNumProps; prop++ {
errcode := C.read_dataset_property(d.list.zh, plist, C.int(prop))
if errcode != 0 {
continue
}
d.Properties[prop] = Property{Value: C.GoString(&(*plist).value[0]),
Source: C.GoString(&(*plist).source[0])}
}
return
}
// Reload and return single specified property. This also reloads requested
// property in Properties map.
func (d *Dataset) GetProperty(p ZFSProp) (prop Property, err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
var plist *C.property_list_t
plist = C.new_property_list()
defer C.free_properties(plist)
errcode := C.read_dataset_property(d.list.zh, plist, C.int(p))
if errcode != 0 {
err = LastError()
return
}
prop = Property{Value: C.GoString(&(*plist).value[0]),
Source: C.GoString(&(*plist).source[0])}
d.Properties[p] = prop
return
}
// Set ZFS dataset property to value. Not all properties can be set,
// some can be set only at creation time and some are read only.
// Always check if returned error and its description.
func (d *Dataset) SetProperty(p ZFSProp, value string) (err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
errcode := C.zfs_prop_set(d.list.zh, C.zfs_prop_to_name(
C.zfs_prop_t(p)), C.CString(value))
if errcode != 0 {
err = LastError()
}
return
}
// Clones the dataset. The target must be of the same type as
// the source.
func (d *Dataset) Clone(target string, props map[ZFSProp]Property) (rd Dataset, err error) {
var cprops *C.nvlist_t
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
if cprops, err = datasetPropertiesTo_nvlist(props); err != nil {
return
}
defer C.nvlist_free(cprops)
if errc := C.zfs_clone(d.list.zh, C.CString(target), cprops); errc != 0 {
err = LastError()
return
}
rd, err = DatasetOpen(target)
return
}
// Create dataset snapshot
func DatasetSnapshot(path string, recur bool, props map[ZFSProp]Property) (rd Dataset, err error) {
var cprops *C.nvlist_t
if cprops, err = datasetPropertiesTo_nvlist(props); err != nil {
return
}
defer C.nvlist_free(cprops)
if errc := C.zfs_snapshot(libzfs_handle, C.CString(path), boolean_t(recur), cprops); errc != 0 {
err = LastError()
return
}
rd, err = DatasetOpen(path)
return
}
// Return zfs dataset path/name
func (d *Dataset) Path() (path string, err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
name := C.zfs_get_name(d.list.zh)
path = C.GoString(name)
return
}
func (d *Dataset) Rollback(snap *Dataset, force bool) (err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
if errc := C.zfs_rollback(d.list.zh,
snap.list.zh, boolean_t(force)); errc != 0 {
err = LastError()
}
return
}
func (d *Dataset) Rename(newname string, recur,
force_umount bool) (err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
if errc := C.zfs_rename(d.list.zh, C.CString(newname),
boolean_t(recur), boolean_t(force_umount)); errc != 0 {
err = LastError()
}
return
}
// Checks to see if the mount is active. If the filesystem is mounted, fills
// in 'where' with the current mountpoint, and returns true. Otherwise,
// returns false.
func (d *Dataset) IsMounted() (mounted bool, where string) {
var cw *C.char
if d.list == nil {
return false, ""
}
m := C.zfs_is_mounted(d.list.zh, &cw)
defer C.free_cstring(cw)
if m != 0 {
return true, C.GoString(cw)
}
return false, ""
}
// Mount the given filesystem.
func (d *Dataset) Mount(options string, flags int) (err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
if ec := C.zfs_mount(d.list.zh, C.CString(options), C.int(flags)); ec != 0 {
err = LastError()
}
return
}
// Unmount the given filesystem.
func (d *Dataset) Unmount(flags int) (err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
if ec := C.zfs_unmount(d.list.zh, nil, C.int(flags)); ec != 0 {
err = LastError()
}
return
}
// Unmount this filesystem and any children inheriting the mountpoint property.
func (d *Dataset) UnmountAll(flags int) (err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
}
if ec := C.zfs_unmountall(d.list.zh, C.int(flags)); ec != 0 {
err = LastError()
}
return
}
// Convert property to name
// ( returns built in string representation of property name).
// This is optional, you can represent each property with string
// name of choice.
func (d *Dataset) PropertyToName(p ZFSProp) (name string) {
if p == ZFSNumProps {
return "numofprops"
}
prop := C.zfs_prop_t(p)
name = C.GoString(C.zfs_prop_to_name(prop))
return
}

31
zfs.h Normal file
View File

@ -0,0 +1,31 @@
/* C wrappers around some zfs calls and C in general that should simplify
* using libzfs from go language, make go code shorter and more readable.
*/
#ifndef SERVERWARE_ZFS_H
#define SERVERWARE_ZFS_H
struct dataset_list {
zfs_handle_t *zh;
void *pnext;
};
typedef struct dataset_list dataset_list_t;
dataset_list_t *create_dataset_list_item();
void dataset_list_close(dataset_list_t *list);
int dataset_list_root(libzfs_handle_t *libzfs, dataset_list_t **first);
int dataset_list_children(zfs_handle_t *zfs, dataset_list_t **first);
dataset_list_t *dataset_next(dataset_list_t *dataset);
int read_dataset_property(zfs_handle_t *zh, property_list_t *list, int prop);
int clear_last_error(libzfs_handle_t *libzfs);
char** alloc_strings(int size);
void strings_setat(char **a, int at, char *v);
#endif
/* SERVERWARE_ZFS_H */

383
zpool.c Normal file
View File

@ -0,0 +1,383 @@
/* C wrappers around some zfs calls and C in general that should simplify
* using libzfs from go language, and make go code shorter and more readable.
*/
#include <libzfs.h>
#include <memory.h>
#include <string.h>
#include <stdio.h>
#include "zpool.h"
static char _lasterr_[1024];
const char *lasterr(void) {
return _lasterr_;
}
zpool_list_t *create_zpool_list_item() {
zpool_list_t *zlist = malloc(sizeof(zpool_list_t));
memset(zlist, 0, sizeof(zpool_list_t));
return zlist;
}
int zpool_list_callb(zpool_handle_t *pool, void *data) {
zpool_list_t **lroot = (zpool_list_t**)data;
zpool_list_t *nroot = create_zpool_list_item();
if ( !((*lroot)->zph) ) {
(*lroot)->zph = pool;
} else {
nroot->zph = pool;
nroot->pnext = (void*)*lroot;
*lroot = nroot;
}
return 0;
}
int zpool_list(libzfs_handle_t *libzfs, zpool_list_t **first) {
int err = 0;
zpool_list_t *zlist = create_zpool_list_item();
err = zpool_iter(libzfs, zpool_list_callb, &zlist);
if ( zlist->zph ) {
*first = zlist;
} else {
*first = 0;
free(zlist);
}
return err;
}
zpool_list_t* zpool_list_open(libzfs_handle_t *libzfs, const char *name) {
zpool_list_t *zlist = create_zpool_list_item();
zlist->zph = zpool_open(libzfs, name);
if ( zlist->zph ) {
return zlist;
} else {
free(zlist);
}
return 0;
}
zpool_list_t *zpool_next(zpool_list_t *pool) {
return pool->pnext;
}
void zpool_list_close(zpool_list_t *pool) {
zpool_close(pool->zph);
free(pool);
}
property_list_t *new_property_list() {
property_list_t *r = malloc(sizeof(property_list_t));
memset(r, 0, sizeof(property_list_t));
return r;
}
void free_properties(property_list_t *root) {
if (root != 0) {
property_list_t *tmp = 0;
do {
tmp = root->pnext;
free(root);
root = tmp;
} while(tmp);
}
}
property_list_t *next_property(property_list_t *list) {
if (list != 0) {
return list->pnext;
}
return list;
}
void zprop_source_tostr(char *dst, zprop_source_t source) {
switch (source) {
case ZPROP_SRC_NONE:
strcpy(dst, "none");
break;
case ZPROP_SRC_TEMPORARY:
strcpy(dst, "temporary");
break;
case ZPROP_SRC_LOCAL:
strcpy(dst, "local");
break;
case ZPROP_SRC_INHERITED:
strcpy(dst, "inherited");
break;
case ZPROP_SRC_RECEIVED:
strcpy(dst, "received");
break;
default:
strcpy(dst, "default");
break;
}
}
int read_zpool_property(zpool_handle_t *zh, property_list_t *list, int prop) {
int r = 0;
zprop_source_t source;
r = zpool_get_prop(zh, prop,
list->value, INT_MAX_VALUE, &source);
if (r == 0) {
// strcpy(list->name, zpool_prop_to_name(prop));
zprop_source_tostr(list->source, source);
}
list->property = (int)prop;
return r;
}
int read_append_zpool_property(zpool_handle_t *zh, property_list_t **proot,
zpool_prop_t prop) {
int r = 0;
property_list_t *newitem = NULL, *root = *proot;
newitem = new_property_list();
r = read_zpool_property(zh, newitem, prop);
// printf("p: %s %s %s\n", newitem->name, newitem->value, newitem->source);
newitem->pnext = root;
*proot = root = newitem;
if (r != 0) {
free_properties(root);
*proot = NULL;
}
return r;
}
property_list_t *read_zpool_properties(zpool_handle_t *zh) {
// read pool name as first property
property_list_t *root = NULL, *list = NULL;
int r = read_append_zpool_property(zh, &root, ZPOOL_PROP_NAME);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_SIZE);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_CAPACITY);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_ALTROOT);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_HEALTH);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_GUID);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_VERSION);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_BOOTFS);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_DELEGATION);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_AUTOREPLACE);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_CACHEFILE);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_FAILUREMODE);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_LISTSNAPS);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_AUTOEXPAND);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_DEDUPDITTO);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_DEDUPRATIO);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_FREE);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_ALLOCATED);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_READONLY);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_ASHIFT);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_COMMENT);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_EXPANDSZ);
if (r != 0) {
return 0;
}
r = read_append_zpool_property(zh, &root, ZPOOL_PROP_FREEING);
if (r != 0) {
return 0;
}
list = new_property_list();
list->property = ZPOOL_NUM_PROPS;
sprintf(list->value, "%d", ZPOOL_NUM_PROPS);
list->pnext = root;
zprop_source_tostr(list->source, ZPROP_SRC_NONE);
root = list;
// printf("Finished properties reading.\n");
return root;
}
pool_state_t zpool_read_state(zpool_handle_t *zh) {
return zpool_get_state(zh);
}
const char *gettext(const char *txt) {
return txt;
}
/*
* Add a property pair (name, string-value) into a property nvlist.
*/
int
add_prop_list(const char *propname, char *propval, nvlist_t **props,
boolean_t poolprop) {
zpool_prop_t prop = ZPROP_INVAL;
zfs_prop_t fprop;
nvlist_t *proplist;
const char *normnm;
char *strval;
if (*props == NULL &&
nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
(void) snprintf(_lasterr_, 1024, "internal error: out of memory");
return (1);
}
proplist = *props;
if (poolprop) {
const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL &&
!zpool_prop_feature(propname)) {
(void) snprintf(_lasterr_, 1024, "property '%s' is "
"not a valid pool property", propname);
return (2);
}
/*
* feature@ properties and version should not be specified
* at the same time.
*/
// if ((prop == ZPROP_INVAL && zpool_prop_feature(propname) &&
// nvlist_exists(proplist, vname)) ||
// (prop == ZPOOL_PROP_VERSION &&
// prop_list_contains_feature(proplist))) {
// (void) fprintf(stderr, gettext("'feature@' and "
// "'version' properties cannot be specified "
// "together\n"));
// return (2);
// }
if (zpool_prop_feature(propname))
normnm = propname;
else
normnm = zpool_prop_to_name(prop);
} else {
if ((fprop = zfs_name_to_prop(propname)) != ZPROP_INVAL) {
normnm = zfs_prop_to_name(fprop);
} else {
normnm = propname;
}
}
if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
prop != ZPOOL_PROP_CACHEFILE) {
(void) snprintf(_lasterr_, 1024, "property '%s' "
"specified multiple times", propname);
return (2);
}
if (nvlist_add_string(proplist, normnm, propval) != 0) {
(void) snprintf(_lasterr_, 1024, "internal "
"error: out of memory\n");
return (1);
}
return (0);
}
nvlist_t** nvlist_alloc_array(int count) {
return malloc(count*sizeof(nvlist_t*));
}
void nvlist_array_set(nvlist_t** a, int i, nvlist_t *item) {
a[i] = item;
}
void nvlist_free_array(nvlist_t **a) {
free(a);
}
void free_cstring(char *str) {
free(str);
}

562
zpool.go Normal file
View File

@ -0,0 +1,562 @@
package zfs
// #include <stdlib.h>
// #include <libzfs.h>
// #include "zpool.h"
// #include "zfs.h"
import "C"
import (
"errors"
"fmt"
"strconv"
)
const (
msgPoolIsNil = "Pool handle not initialized or its closed"
)
type PoolProperties map[PoolProp]string
type ZFSProperties map[ZFSProp]string
// Object represents handler to single ZFS pool
//
/* Pool.Properties map[string]Property
*/
// Map of all ZFS pool properties, changing any of this will not affect ZFS
// pool, for that use SetProperty( name, value string) method of the pool
// object. This map is initial loaded when ever you open or create pool to
// give easy access to listing all available properties. It can be refreshed
// with up to date values with call to (*Pool) ReloadProperties
type Pool struct {
list *C.zpool_list_t
Properties []Property
Features map[string]string
}
// Open ZFS pool handler by name.
// Returns Pool object, requires Pool.Close() to be called explicitly
// for memory cleanup after object is not needed anymore.
func PoolOpen(name string) (pool Pool, err error) {
pool.list = C.zpool_list_open(libzfs_handle, C.CString(name))
if pool.list != nil {
err = pool.ReloadProperties()
return
}
err = LastError()
return
}
// Given a list of directories to search, find and import pool with matching
// name stored on disk.
func PoolImport(name string, searchpaths []string) (pool Pool, err error) {
errPoolList := errors.New("Failed to list pools")
var elem *C.nvpair_t
var config *C.nvlist_t
numofp := len(searchpaths)
cpaths := C.alloc_strings(C.int(numofp))
for i, path := range searchpaths {
C.strings_setat(cpaths, C.int(i), C.CString(path))
}
pools := C.zpool_find_import(libzfs_handle, C.int(numofp), cpaths)
defer C.nvlist_free(pools)
elem = C.nvlist_next_nvpair(pools, elem)
for ; elem != nil; elem = C.nvlist_next_nvpair(pools, elem) {
var cname *C.char
var tconfig *C.nvlist_t
retcode := C.nvpair_value_nvlist(elem, &tconfig)
if retcode != 0 {
err = errPoolList
return
}
retcode = C.nvlist_lookup_string(tconfig,
C.CString(C.ZPOOL_CONFIG_POOL_NAME), &cname)
if retcode != 0 {
err = errPoolList
return
}
oname := C.GoString(cname)
if name == oname {
config = tconfig
break
}
}
if config == nil {
err = errors.New("No pools to import found with name " + name)
return
}
retcode := C.zpool_import(libzfs_handle, config, C.CString(name), nil)
if retcode != 0 {
err = LastError()
return
}
pool, err = PoolOpen(name)
return
}
// Open all active ZFS pools on current system.
// Returns array of Pool handlers, each have to be closed after not needed
// anymore. Call Pool.Close() method.
func PoolOpenAll() (pools []Pool, err error) {
var pool Pool
errcode := C.zpool_list(libzfs_handle, &pool.list)
for pool.list != nil {
err = pool.ReloadProperties()
if err != nil {
return
}
pools = append(pools, pool)
pool.list = C.zpool_next(pool.list)
}
if errcode != 0 {
err = LastError()
}
return
}
func PoolCloseAll(pools []Pool) {
for _, p := range pools {
p.Close()
}
}
// Convert property to name
// ( returns built in string representation of property name).
// This is optional, you can represent each property with string
// name of choice.
func (pool *Pool) PropertyToName(p PoolProp) (name string) {
if p == PoolNumProps {
return "numofprops"
}
prop := C.zpool_prop_t(p)
name = C.GoString(C.zpool_prop_to_name(prop))
return
}
// Re-read ZFS pool properties and features, refresh Pool.Properties and
// Pool.Features map
func (pool *Pool) ReloadProperties() (err error) {
propList := C.read_zpool_properties(pool.list.zph)
if propList == nil {
err = LastError()
return
}
pool.Properties = make([]Property, PoolNumProps+1)
next := propList
for next != nil {
pool.Properties[next.property] = Property{Value: C.GoString(&(next.value[0])), Source: C.GoString(&(next.source[0]))}
next = C.next_property(next)
}
C.free_properties(propList)
// read features
pool.Features = map[string]string{
"async_destroy": "disabled",
"empty_bpobj": "disabled",
"lz4_compress": "disabled"}
for name, _ := range pool.Features {
pool.GetFeature(name)
}
return
}
// Reload and return single specified property. This also reloads requested
// property in Properties map.
func (pool *Pool) GetProperty(p PoolProp) (prop Property, err error) {
if pool.list != nil {
// First check if property exist at all
if p < PoolPropName || p > PoolNumProps {
err = errors.New(fmt.Sprint("Unknown zpool property: ",
pool.PropertyToName(p)))
return
}
var list C.property_list_t
r := C.read_zpool_property(pool.list.zph, &list, C.int(p))
if r != 0 {
err = LastError()
}
prop.Value = C.GoString(&(list.value[0]))
prop.Source = C.GoString(&(list.source[0]))
pool.Properties[p] = prop
return
}
return prop, errors.New(msgPoolIsNil)
}
// Reload and return single specified feature. This also reloads requested
// feature in Features map.
func (pool *Pool) GetFeature(name string) (value string, err error) {
var fvalue [512]C.char
sname := fmt.Sprint("feature@", name)
r := C.zpool_prop_get_feature(pool.list.zph, C.CString(sname), &(fvalue[0]), 512)
if r != 0 {
err = errors.New(fmt.Sprint("Unknown zpool feature: ", name))
return
}
value = C.GoString(&(fvalue[0]))
pool.Features[name] = value
return
}
// Set ZFS pool property to value. Not all properties can be set,
// some can be set only at creation time and some are read only.
// Always check if returned error and its description.
func (pool *Pool) SetProperty(p PoolProp, value string) (err error) {
if pool.list != nil {
// First check if property exist at all
if p < PoolPropName || p > PoolNumProps {
err = errors.New(fmt.Sprint("Unknown zpool property: ",
pool.PropertyToName(p)))
return
}
r := C.zpool_set_prop(pool.list.zph, C.CString(pool.PropertyToName(p)), C.CString(value))
if r != 0 {
err = LastError()
} else {
// Update Properties member with change made
_, err = pool.GetProperty(p)
}
return
}
return errors.New(msgPoolIsNil)
}
// Close ZFS pool handler and release associated memory.
// Do not use Pool object after this.
func (pool *Pool) Close() {
C.zpool_list_close(pool.list)
pool.list = nil
}
// Get (re-read) ZFS pool name property
func (pool *Pool) Name() (name string, err error) {
if pool.list == nil {
err = errors.New(msgPoolIsNil)
} else {
name = C.GoString(C.zpool_get_name(pool.list.zph))
pool.Properties[PoolPropName] = Property{Value: name, Source: "none"}
}
return
}
// Get ZFS pool state
// Return the state of the pool (ACTIVE or UNAVAILABLE)
func (pool *Pool) State() (state PoolState, err error) {
if pool.list == nil {
err = errors.New(msgPoolIsNil)
} else {
state = PoolState(C.zpool_read_state(pool.list.zph))
}
return
}
// ZFS virtual device specification
type VDevSpec struct {
Type VDevType
Devices []VDevSpec // groups other devices (e.g. mirror)
Parity uint
Path string
}
func (self *VDevSpec) isGrouping() (grouping bool, mindevs, maxdevs int) {
maxdevs = int(^uint(0) >> 1)
if self.Type == VDevTypeRaidz {
grouping = true
if self.Parity == 0 {
self.Parity = 1
}
if self.Parity > 254 {
self.Parity = 254
}
mindevs = int(self.Parity) + 1
maxdevs = 255
} else if self.Type == VDevTypeMirror {
grouping = true
mindevs = 2
} else if self.Type == VDevTypeLog || self.Type == VDevTypeSpare || self.Type == VDevTypeL2cache {
grouping = true
mindevs = 1
}
return
}
func (self *VDevSpec) isLog() (r C.uint64_t) {
r = 0
if self.Type == VDevTypeLog {
r = 1
}
return
}
func toCPoolProperties(props PoolProperties) (cprops *C.nvlist_t) {
cprops = nil
for prop, value := range props {
name := C.zpool_prop_to_name(C.zpool_prop_t(prop))
r := C.add_prop_list(name, C.CString(value), &cprops, C.boolean_t(1))
if r != 0 {
if cprops != nil {
C.nvlist_free(cprops)
cprops = nil
}
return
}
}
return
}
func toCZFSProperties(props ZFSProperties) (cprops *C.nvlist_t) {
cprops = nil
for prop, value := range props {
name := C.zfs_prop_to_name(C.zfs_prop_t(prop))
r := C.add_prop_list(name, C.CString(value), &cprops, C.boolean_t(0))
if r != 0 {
if cprops != nil {
C.nvlist_free(cprops)
cprops = nil
}
return
}
}
return
}
func buildVDevSpec(root *C.nvlist_t, rtype VDevType, vdevs []VDevSpec,
props PoolProperties) (err error) {
count := len(vdevs)
if count == 0 {
return
}
childrens := C.nvlist_alloc_array(C.int(count))
if childrens == nil {
err = errors.New("No enough memory")
return
}
defer C.nvlist_free_array(childrens)
spares := C.nvlist_alloc_array(C.int(count))
if childrens == nil {
err = errors.New("No enough memory")
return
}
nspares := 0
defer C.nvlist_free_array(spares)
l2cache := C.nvlist_alloc_array(C.int(count))
if childrens == nil {
err = errors.New("No enough memory")
return
}
nl2cache := 0
defer C.nvlist_free_array(l2cache)
for i, vdev := range vdevs {
grouping, mindevs, maxdevs := vdev.isGrouping()
var child *C.nvlist_t = nil
// fmt.Println(vdev.Type)
if r := C.nvlist_alloc(&child, C.NV_UNIQUE_NAME, 0); r != 0 {
err = errors.New("Failed to allocate vdev")
return
}
vcount := len(vdev.Devices)
if vcount < mindevs || vcount > maxdevs {
err = errors.New(fmt.Sprintf(
"Invalid vdev specification: %s supports no less than %d or more than %d devices", vdev.Type, mindevs, maxdevs))
return
}
if r := C.nvlist_add_string(child, C.CString(C.ZPOOL_CONFIG_TYPE),
C.CString(string(vdev.Type))); r != 0 {
err = errors.New("Failed to set vdev type")
return
}
if r := C.nvlist_add_uint64(child, C.CString(C.ZPOOL_CONFIG_IS_LOG),
vdev.isLog()); r != 0 {
err = errors.New("Failed to allocate vdev (is_log)")
return
}
if grouping {
if vdev.Type == VDevTypeRaidz {
r := C.nvlist_add_uint64(child,
C.CString(C.ZPOOL_CONFIG_NPARITY),
C.uint64_t(mindevs-1))
if r != 0 {
err = errors.New("Failed to allocate vdev (parity)")
return
}
}
if err = buildVDevSpec(child, vdev.Type, vdev.Devices,
props); err != nil {
return
}
} else {
// if vdev.Type == VDevTypeDisk {
if r := C.nvlist_add_uint64(child,
C.CString(C.ZPOOL_CONFIG_WHOLE_DISK), 1); r != 0 {
err = errors.New("Failed to allocate vdev child (whdisk)")
return
}
// }
if len(vdev.Path) > 0 {
if r := C.nvlist_add_string(
child, C.CString(C.ZPOOL_CONFIG_PATH),
C.CString(vdev.Path)); r != 0 {
err = errors.New("Failed to allocate vdev child (type)")
return
}
ashift, _ := strconv.Atoi(props[PoolPropAshift])
if ashift > 0 {
if r := C.nvlist_add_uint64(child,
C.CString(C.ZPOOL_CONFIG_ASHIFT),
C.uint64_t(ashift)); r != 0 {
err = errors.New("Failed to allocate vdev child (ashift)")
return
}
}
}
if vdev.Type == VDevTypeSpare {
C.nvlist_array_set(spares, C.int(nspares), child)
nspares++
count--
continue
} else if vdev.Type == VDevTypeL2cache {
C.nvlist_array_set(l2cache, C.int(nl2cache), child)
nl2cache++
count--
continue
}
}
C.nvlist_array_set(childrens, C.int(i), child)
}
if count > 0 {
if r := C.nvlist_add_nvlist_array(root,
C.CString(C.ZPOOL_CONFIG_CHILDREN), childrens,
C.uint_t(count)); r != 0 {
err = errors.New("Failed to allocate vdev children")
return
}
// fmt.Println("childs", root, count, rtype)
// debug.PrintStack()
}
if nl2cache > 0 {
if r := C.nvlist_add_nvlist_array(root,
C.CString(C.ZPOOL_CONFIG_L2CACHE), l2cache,
C.uint_t(nl2cache)); r != 0 {
err = errors.New("Failed to allocate vdev cache")
return
}
}
if nspares > 0 {
if r := C.nvlist_add_nvlist_array(root,
C.CString(C.ZPOOL_CONFIG_SPARES), spares,
C.uint_t(nspares)); r != 0 {
err = errors.New("Failed to allocate vdev spare")
return
}
// fmt.Println("spares", root, count)
}
return
}
// Create ZFS pool per specs, features and properties of pool and root dataset
func PoolCreate(name string, vdevs []VDevSpec, features map[string]string,
props PoolProperties, fsprops ZFSProperties) (pool Pool, err error) {
// create root vdev nvroot
var nvroot *C.nvlist_t = nil
if r := C.nvlist_alloc(&nvroot, C.NV_UNIQUE_NAME, 0); r != 0 {
err = errors.New("Failed to allocate root vdev")
return
}
if r := C.nvlist_add_string(nvroot, C.CString(C.ZPOOL_CONFIG_TYPE),
C.CString(string(VDevTypeRoot))); r != 0 {
err = errors.New("Failed to allocate root vdev")
return
}
defer C.nvlist_free(nvroot)
// Now we need to build specs (vdev hierarchy)
if err = buildVDevSpec(nvroot, VDevTypeRoot, vdevs, props); err != nil {
return
}
// convert properties
cprops := toCPoolProperties(props)
if cprops != nil {
defer C.nvlist_free(cprops)
} else if len(props) > 0 {
err = errors.New("Failed to allocate pool properties")
return
}
cfsprops := toCZFSProperties(fsprops)
if cfsprops != nil {
defer C.nvlist_free(cfsprops)
} else if len(fsprops) > 0 {
err = errors.New("Failed to allocate FS properties")
return
}
for fname, fval := range features {
sfname := fmt.Sprintf("feature@%s", fname)
r := C.add_prop_list(C.CString(sfname), C.CString(fval), &cprops,
C.boolean_t(1))
if r != 0 {
if cprops != nil {
C.nvlist_free(cprops)
cprops = nil
}
return
}
}
// Create actual pool then open
if r := C.zpool_create(libzfs_handle, C.CString(name), nvroot,
cprops, cfsprops); r != 0 {
err = LastError()
return
}
pool, err = PoolOpen(name)
return
}
// Get pool status. Let you check if pool healthy.
func (pool *Pool) Status() (status PoolStatus, err error) {
var msgid *C.char
var reason C.zpool_status_t
var errata C.zpool_errata_t
if pool.list == nil {
err = errors.New(msgPoolIsNil)
return
}
reason = C.zpool_get_status(pool.list.zph, &msgid, &errata)
status = PoolStatus(reason)
return
}
// Destroy the pool. It is up to the caller to ensure that there are no
// datasets left in the pool. logStr is optional if specified it is
// appended to ZFS history
func (pool *Pool) Destroy(logStr string) (err error) {
if pool.list == nil {
err = errors.New(msgPoolIsNil)
return
}
retcode := C.zpool_destroy(pool.list.zph, C.CString(logStr))
if retcode != 0 {
err = LastError()
}
return
}
// Exports the pool from the system.
// Before exporting the pool, all datasets within the pool are unmounted.
// A pool can not be exported if it has a shared spare that is currently
// being used.
func (pool *Pool) Export(force bool) (err error) {
var force_t C.boolean_t = 0
if force {
force_t = 1
}
retcode := C.zpool_export(pool.list.zph, force_t, nil)
if retcode != 0 {
err = LastError()
}
return
}

58
zpool.h Normal file
View File

@ -0,0 +1,58 @@
/* C wrappers around some zfs calls and C in general that should simplify
* using libzfs from go language, make go code shorter and more readable.
*/
#ifndef SERVERWARE_ZPOOL_H
#define SERVERWARE_ZPOOL_H
#define INT_MAX_NAME 256
#define INT_MAX_VALUE 1024
struct zpool_list {
zpool_handle_t *zph;
void *pnext;
};
typedef struct property_list {
char value[INT_MAX_VALUE];
char source[INT_MAX_NAME];
int property;
void *pnext;
} property_list_t;
typedef struct zpool_list zpool_list_t;
property_list_t *new_property_list();
zpool_list_t *create_zpool_list_item();
void zprop_source_tostr(char *dst, zprop_source_t source);
zpool_list_t* zpool_list_open(libzfs_handle_t *libzfs, const char *name);
int zpool_list(libzfs_handle_t *libzfs, zpool_list_t **first);
zpool_list_t *zpool_next(zpool_list_t *pool);
void zpool_list_close(zpool_list_t *pool);
int read_zpool_property(zpool_handle_t *zh, property_list_t *list, int prop);
property_list_t *read_zpool_properties(zpool_handle_t *zh);
property_list_t *next_property(property_list_t *list);
void free_properties(property_list_t *root);
pool_state_t zpool_read_state(zpool_handle_t *zh);
const char *lasterr(void);
int
add_prop_list(const char *propname, char *propval, nvlist_t **props,
boolean_t poolprop);
nvlist_t** nvlist_alloc_array(int count);
void nvlist_array_set(nvlist_t** a, int i, nvlist_t *item);
void nvlist_free_array(nvlist_t **a);
void free_cstring(char *str);
#endif
/* SERVERWARE_ZPOOL_H */

395
zpool_test.go Normal file
View File

@ -0,0 +1,395 @@
package zfs
import (
"fmt"
"io/ioutil"
"os"
"strconv"
"testing"
)
const (
TST_POOL_NAME = "TESTPOOL"
TST_DATASET_PATH = "TESTPOOL/DATASET"
)
func CreateTmpSparse(prefix string, size int64) (path string, err error) {
sf, err := ioutil.TempFile("/tmp", prefix)
if err != nil {
return
}
defer sf.Close()
if err = sf.Truncate(size); err != nil {
return
}
path = sf.Name()
return
}
// Create 3 sparse file 5G in /tmp directory each 5G size, and use them to create mirror TESTPOOL with one spare "disk"
func TestPoolCreate(t *testing.T) {
print("TEST PoolCreate ... ")
var s1path, s2path, s3path string
var err error
if s1path, err = CreateTmpSparse("zfs_test_", 0x140000000); err != nil {
t.Error(err)
return
}
if s2path, err = CreateTmpSparse("zfs_test_", 0x140000000); err != nil {
// try cleanup
os.Remove(s1path)
t.Error(err)
return
}
if s3path, err = CreateTmpSparse("zfs_test_", 0x140000000); err != nil {
// try cleanup
os.Remove(s1path)
os.Remove(s2path)
t.Error(err)
return
}
disks := [2]string{s1path, s2path}
var vdevs, mdevs, sdevs []VDevSpec
for _, d := range disks {
mdevs = append(mdevs,
VDevSpec{Type: VDevTypeFile, Path: d})
}
sdevs = []VDevSpec{
{Type: VDevTypeFile, Path: s3path}}
vdevs = []VDevSpec{
VDevSpec{Type: VDevTypeMirror, Devices: mdevs},
VDevSpec{Type: VDevTypeSpare, Devices: sdevs},
}
props := make(map[PoolProp]string)
fsprops := make(map[ZFSProp]string)
features := make(map[string]string)
fsprops[ZFSPropMountpoint] = "none"
features["async_destroy"] = "enabled"
features["empty_bpobj"] = "enabled"
features["lz4_compress"] = "enabled"
pool, err := PoolCreate(TST_POOL_NAME, vdevs, features, props, fsprops)
if err != nil {
t.Error(err)
// try cleanup
os.Remove(s1path)
os.Remove(s2path)
os.Remove(s3path)
return
}
defer pool.Close()
// try cleanup
os.Remove(s1path)
os.Remove(s2path)
os.Remove(s3path)
println("PASS")
}
// Open and list all pools and them state on the system
// Then list properties of last pool in the list
func TestPoolOpenAll(t *testing.T) {
println("TEST PoolOpenAll() ... ")
var pname string
pools, err := PoolOpenAll()
if err != nil {
t.Error(err)
return
}
println("\tThere is ", len(pools), " ZFS pools.")
for _, p := range pools {
pname, err = p.Name()
if err != nil {
t.Error(err)
p.Close()
return
}
pstate, err := p.State()
if err != nil {
t.Error(err)
p.Close()
return
}
println("\tPool: ", pname, " state: ", pstate)
p.Close()
}
if len(pname) > 0 {
// test open on last pool
println("\tTry to open pool ", pname)
p, err := PoolOpen(pname)
if err != nil {
t.Error(err)
return
}
println("\tOpen pool: ", pname, " success")
println("\t", pname, " PROPERTIES:")
pc, _ := strconv.Atoi(p.Properties[PoolNumProps].Value)
if len(p.Properties) != (pc + 1) {
p.Close()
t.Error(fmt.Sprint("Number of zpool properties does not match ",
len(p.Properties), " != ", pc+1))
return
}
for key, value := range p.Properties {
pkey := PoolProp(key)
println("\t\t", p.PropertyToName(pkey), " = ", value.Value, " <- ", value.Source)
}
for key, value := range p.Features {
fmt.Printf("\t feature@%s = %s <- local\n", key, value)
}
if p.Properties[PoolPropListsnaps].Value == "off" {
println("\tlistsnapshots to on")
if err = p.SetProperty(PoolPropListsnaps, "on"); err != nil {
t.Error(err)
}
} else {
println("\tlistsnapshots to off")
if err = p.SetProperty(PoolPropListsnaps, "off"); err != nil {
t.Error(err)
}
}
if err == nil {
println("\tlistsnapshots", "is changed to ",
p.Properties[PoolPropListsnaps].Value, " <- ",
p.Properties[PoolPropListsnaps].Source)
}
p.Close()
}
println("PASS")
}
func TestDatasetCreate(t *testing.T) {
print("TEST DatasetCreate(", TST_DATASET_PATH, ") ... ")
props := make(map[ZFSProp]Property)
d, err := DatasetCreate(TST_DATASET_PATH, DatasetTypeFilesystem, props)
if err != nil {
t.Error(err)
return
}
d.Close()
println("PASS")
}
func TestDatasetOpen(t *testing.T) {
print("TEST DatasetOpen(", TST_DATASET_PATH, ") ... ")
d, err := DatasetOpen(TST_DATASET_PATH)
if err != nil {
t.Error(err)
return
}
d.Close()
println("PASS")
}
func printDatasets(ds []Dataset) error {
for _, d := range ds {
path, err := d.Path()
if err != nil {
return err
}
println("\t", path)
if len(d.Children) > 0 {
printDatasets(d.Children)
}
}
return nil
}
func TestDatasetOpenAll(t *testing.T) {
println("TEST DatasetOpenAll()/DatasetCloseAll() ... ")
ds, err := DatasetOpenAll()
if err != nil {
t.Error(err)
return
}
if err = printDatasets(ds); err != nil {
DatasetCloseAll(ds)
t.Error(err)
return
}
DatasetCloseAll(ds)
println("PASS")
}
func TestDatasetDestroy(t *testing.T) {
print("TEST DATASET Destroy()", TST_DATASET_PATH, " ... ")
d, err := DatasetOpen(TST_DATASET_PATH)
if err != nil {
t.Error(err)
return
}
defer d.Close()
if err = d.Destroy(false); err != nil {
t.Error(err)
return
}
println("PASS")
}
func TestPoolDestroy(t *testing.T) {
print("TEST POOL Destroy()", TST_POOL_NAME, " ... ")
p, err := PoolOpen(TST_POOL_NAME)
if err != nil {
t.Error(err)
return
}
defer p.Close()
if err = p.Destroy("Test of pool destroy (" + TST_POOL_NAME + ")"); err != nil {
t.Error(err.Error())
return
}
println("PASS")
}
func TestFailPoolOpen(t *testing.T) {
print("TEST failing to open pool ... ")
pname := "fail to open this pool"
p, err := PoolOpen(pname)
if err != nil {
println("PASS")
return
}
t.Error("PoolOpen pass when it should fail")
p.Close()
}
func ExamplePoolProp() {
if pool, err := PoolOpen("SSD"); err == nil {
print("Pool size is: ", pool.Properties[PoolPropSize].Value)
// Turn on snapshot listing for pool
pool.SetProperty(PoolPropListsnaps, "on")
} else {
print("Error: ", err)
}
}
// Open and list all pools on system with them properties
func ExamplePoolOpenAll() {
// Lets open handles to all active pools on system
pools, err := PoolOpenAll()
if err != nil {
println(err)
}
// Print each pool name and properties
for _, p := range pools {
// Print fancy header
fmt.Printf("\n -----------------------------------------------------------\n")
fmt.Printf(" POOL: %49s \n", p.Properties[PoolPropName].Value)
fmt.Printf("|-----------------------------------------------------------|\n")
fmt.Printf("| PROPERTY | VALUE | SOURCE |\n")
fmt.Printf("|-----------------------------------------------------------|\n")
// Iterate pool properties and print name, value and source
for key, prop := range p.Properties {
pkey := PoolProp(key)
if pkey == PoolPropName {
continue // Skip name its already printed above
}
fmt.Printf("|%14s | %20s | %15s |\n", p.PropertyToName(pkey),
prop.Value, prop.Source)
println("")
}
println("")
// Close pool handle and free memory, since it will not be used anymore
p.Close()
}
}
func ExamplePoolCreate() {
disks := [2]string{"/dev/disk/by-id/ATA-123", "/dev/disk/by-id/ATA-456"}
var vdevs, mdevs, sdevs []VDevSpec
// build mirror devices specs
for _, d := range disks {
mdevs = append(mdevs,
VDevSpec{Type: VDevTypeDisk, Path: d})
}
// spare device specs
sdevs = []VDevSpec{
{Type: VDevTypeDisk, Path: "/dev/disk/by-id/ATA-789"}}
// pool specs
vdevs = []VDevSpec{
VDevSpec{Type: VDevTypeMirror, Devices: mdevs},
VDevSpec{Type: VDevTypeSpare, Devices: sdevs},
}
// pool properties
props := make(map[PoolProp]string)
// root dataset filesystem properties
fsprops := make(map[ZFSProp]string)
// pool features
features := make(map[string]string)
// Turn off auto mounting by ZFS
fsprops[ZFSPropMountpoint] = "none"
// Enable some features
features["async_destroy"] = "enabled"
features["empty_bpobj"] = "enabled"
features["lz4_compress"] = "enabled"
// Based on specs formed above create test pool as 2 disk mirror and
// one spare disk
pool, err := PoolCreate("TESTPOOL", vdevs, features, props, fsprops)
if err != nil {
println("Error: ", err.Error())
return
}
defer pool.Close()
}
func ExamplePool_Destroy() {
pname := "TESTPOOL"
// Need handle to pool at first place
p, err := PoolOpen(pname)
if err != nil {
println("Error: ", err.Error())
return
}
// Make sure pool handle is free after we are done here
defer p.Close()
if err = p.Destroy("Example of pool destroy (TESTPOOL)"); err != nil {
println("Error: ", err.Error())
return
}
}
// Example of creating ZFS volume
func ExampleDatasetCreate() {
// Create map to represent ZFS dataset properties. This is equivalent to
// list of properties you can get from ZFS CLI tool, and some more
// internally used by libzfs.
props := make(map[ZFSProp]Property)
// I choose to create (block) volume 1GiB in size. Size is just ZFS dataset
// property and this is done as map of strings. So, You have to either
// specify size as base 10 number in string, or use strconv package or
// similar to convert in to string (base 10) from numeric type.
strSize := "1073741824"
props[ZFSPropVolsize] = Property{Value: strSize}
// In addition I explicitly choose some more properties to be set.
props[ZFSPropVolblocksize] = Property{Value: "4096"}
props[ZFSPropReservation] = Property{Value: strSize}
// Lets create desired volume
d, err := DatasetCreate("TESTPOOL/VOLUME1", DatasetTypeVolume, props)
if err != nil {
println(err.Error())
return
}
// Dataset have to be closed for memory cleanup
defer d.Close()
println("Created zfs volume TESTPOOL/VOLUME1")
}