Merge branch 'dev-zfs-0.7.x'

# Conflicts:
#	zpool.c
This commit is contained in:
Faruk Kasumovic 2018-11-09 16:24:46 +01:00
commit d7d3b1e9b7
9 changed files with 320 additions and 89 deletions

View File

@ -25,6 +25,7 @@ func Test(t *testing.T) {
zfsTestDatasetSnapshot(t)
zfsTestDatasetOpenAll(t)
zfsTestDatasetSetProperty(t)
zfsTestDatasetHoldRelease(t)
zfsTestDatasetDestroy(t)

View File

@ -90,8 +90,11 @@ const (
PoolStatusFailingDev /* device experiencing errors */
PoolStatusVersionNewer /* newer on-disk version */
PoolStatusHostidMismatch /* last accessed by another system */
PoolStatusHosidActive /* currently active on another system */
PoolStatusHostidRequired /* multihost=on and hostid=0 */
PoolStatusIoFailureWait /* failed I/O, failmode 'wait' */
PoolStatusIoFailureContinue /* failed I/O, failmode 'continue' */
PoolStatusIOFailureMap /* ailed MMP, failmode not 'panic' */
PoolStatusBadLog /* cannot read log chain(s) */
PoolStatusErrata /* informational errata available */
@ -144,7 +147,9 @@ const (
// Pool properties. Enumerates available ZFS pool properties. Use it to access
// pool properties either to read or set soecific property.
const (
PoolPropName Prop = iota
PoolPropCont Prop = iota - 2
PoolPropInval
PoolPropName
PoolPropSize
PoolPropCapacity
PoolPropAltroot
@ -171,6 +176,8 @@ const (
PoolPropLeaked
PoolPropMaxBlockSize
PoolPropTName
PoolPropMaxNodeSize
PoolPropMultiHost
PoolNumProps
)
@ -181,7 +188,9 @@ const (
* the property table in module/zcommon/zfs_prop.c.
*/
const (
DatasetPropType Prop = iota
DatasetPropCont Prop = iota - 2
DatasetPropBad
DatasetPropType
DatasetPropCreation
DatasetPropUsed
DatasetPropAvailable
@ -207,7 +216,7 @@ const (
DatasetPropSnapdir
DatasetPropPrivate /* not exposed to user, temporary */
DatasetPropAclinherit
DatasetPropCreatetxg /* not exposed to the user */
DatasetPropCreateTXG /* not exposed to the user */
DatasetPropName /* not exposed to the user */
DatasetPropCanmount
DatasetPropIscsioptions /* not exposed to the user */
@ -240,12 +249,14 @@ const (
DatasetPropDedup
DatasetPropMlslabel
DatasetPropSync
DatasetPropDnodeSize
DatasetPropRefratio
DatasetPropWritten
DatasetPropClones
DatasetPropLogicalused
DatasetPropLogicalreferenced
DatasetPropInconsistent /* not exposed to the user */
DatasetPropVolmode
DatasetPropFilesystemLimit
DatasetPropSnapshotLimit
DatasetPropFilesystemCount
@ -259,6 +270,17 @@ const (
DatasetPropRelatime
DatasetPropRedundantMetadata
DatasetPropOverlay
DatasetPropPrevSnap
DatasetPropReceiveResumeToken
DatasetPropEncryption
DatasetPropKeyLocation
DatasetPropKeyFormat
DatasetPropPBKDF2Salt
DatasetPropPBKDF2Iters
DatasetPropEncryptionRoot
DatasetPropKeyGUID
DatasetPropKeyStatus
DatasetPropRemapTXG /* not exposed to the user */
DatasetNumProps
)

View File

@ -5,13 +5,18 @@ package zfs
// #include "common.h"
// #include "zpool.h"
// #include "zfs.h"
// #include <memory.h>
// #include <string.h>
import "C"
import (
"fmt"
"io/ioutil"
"os"
"path"
"regexp"
"strconv"
"strings"
"syscall"
"time"
"unsafe"
)
@ -23,11 +28,11 @@ type SendFlags struct {
Dedup bool
Props bool
DryRun bool
// Parsable bool
// Progress bool
Parsable bool
Progress bool
LargeBlock bool
EmbedData bool
// Compress bool
Compress bool
}
type RecvFlags struct {
@ -58,11 +63,11 @@ func to_sendflags_t(flags *SendFlags) (cflags *C.sendflags_t) {
cflags.dedup = to_boolean_t(flags.Dedup)
cflags.props = to_boolean_t(flags.Props)
cflags.dryrun = to_boolean_t(flags.DryRun)
// cflags.parsable = to_boolean_t(flags.Parsable)
// cflags.progress = to_boolean_t(flags.Progress)
cflags.parsable = to_boolean_t(flags.Parsable)
cflags.progress = to_boolean_t(flags.Progress)
cflags.largeblock = to_boolean_t(flags.LargeBlock)
cflags.embed_data = to_boolean_t(flags.EmbedData)
// cflags.compress = to_boolean_t(flags.Compress)
cflags.compress = to_boolean_t(flags.Compress)
return
}
@ -96,15 +101,20 @@ func (d *Dataset) send(FromName string, outf *os.File, flags *SendFlags) (err er
if dpath, err = d.Path(); err != nil {
return
}
sendparams := strings.Split(dpath, "@")
parent := sendparams[0]
if len(FromName) > 0 {
if FromName[0] == '#' || FromName[0] == '@' {
FromName = dpath + FromName
if FromName[0] == '@' {
FromName = FromName[1:]
} else if strings.Contains(FromName, "/") {
from := strings.Split(FromName, "@")
if len(from) > 0 {
FromName = from[1]
}
}
cfromname = C.CString(FromName)
defer C.free(unsafe.Pointer(cfromname))
}
sendparams := strings.Split(dpath, "@")
parent := sendparams[0]
ctoname = C.CString(sendparams[1])
defer C.free(unsafe.Pointer(ctoname))
if pd, err = DatasetOpen(parent); err != nil {
@ -195,57 +205,65 @@ func (d *Dataset) SendFrom(FromName string, outf *os.File, flags SendFlags) (err
return
}
}
err = d.send(from[1], outf, &flags)
err = d.send("@"+from[1], outf, &flags)
return
}
func (d *Dataset) SendSize(FromName string, flags SendFlags) (size uint64, err error) {
var porigin Property
var from Dataset
var dpath string
if dpath, err = d.Path(); err != nil {
// SendSize - estimate snapshot size to transfer
func (d *Dataset) SendSize(FromName string, flags SendFlags) (size int64, err error) {
var r, w *os.File
errch := make(chan error)
defer func() {
select {
case <-errch:
default:
}
close(errch)
}()
flags.DryRun = true
flags.Verbose = true
flags.Progress = true
flags.Parsable = true
if r, w, err = os.Pipe(); err != nil {
return
}
zc := C.new_zfs_cmd()
defer C.free(unsafe.Pointer(zc))
dpath = strings.Split(dpath, "@")[0]
if len(FromName) > 0 {
defer r.Close()
go func() {
var tmpe error
saveOut := C.dup(C.fileno(C.stdout))
if res := C.dup2(C.int(w.Fd()), C.fileno(C.stdout)); res < 0 {
tmpe = fmt.Errorf("Redirection of zfslib stdout failed %d", res)
} else {
tmpe = d.send(FromName, w, &flags)
C.fflush(C.stdout)
C.dup2(saveOut, C.fileno(C.stdout))
}
w.Close()
errch <- tmpe
}()
if FromName[0] == '#' || FromName[0] == '@' {
FromName = dpath + FromName
}
porigin, _ = d.GetProperty(DatasetPropOrigin)
if len(porigin.Value) > 0 && porigin.Value == FromName {
FromName = ""
flags.FromOrigin = true
}
if from, err = DatasetOpen(FromName); err != nil {
r.SetReadDeadline(time.Now().Add(15 * time.Second))
var data []byte
if data, err = ioutil.ReadAll(r); err != nil {
return
}
// parse size
var sizeRe *regexp.Regexp
if sizeRe, err = regexp.Compile("size[ \t]*([0-9]+)"); err != nil {
return
}
matches := sizeRe.FindAllSubmatch(data, 3)
if len(matches) > 0 && len(matches[0]) > 1 {
if size, err = strconv.ParseInt(
string(matches[0][1]), 10, 64); err != nil {
return
}
zc.zc_fromobj = C.zfs_prop_get_int(from.list.zh, C.ZFS_PROP_OBJSETID)
from.Close()
} else {
zc.zc_fromobj = 0
}
zc.zc_obj = C.uint64_t(to_boolean_t(flags.FromOrigin))
zc.zc_sendobj = C.zfs_prop_get_int(d.list.zh, C.ZFS_PROP_OBJSETID)
zc.zc_guid = 1
zc.zc_flags = 0
if flags.LargeBlock {
zc.zc_flags |= C.LZC_SEND_FLAG_LARGE_BLOCK
}
if flags.EmbedData {
zc.zc_flags |= C.LZC_SEND_FLAG_EMBED_DATA
}
// C.estimate_ioctl(d.list.zhp, prevsnap_obj, to_boolean_t(flags.FromOrigin), lzc_send_flags, unsafe.Pointer(&size))
if ec, e := C.estimate_send_size(zc); ec != 0 {
err = fmt.Errorf("Failed to estimate send size. %s %d", e.Error(), e.(syscall.Errno))
}
size = uint64(zc.zc_objset_type)
err = <-errch
return
}
// Receive - receive snapshot stream
func (d *Dataset) Receive(inf *os.File, flags RecvFlags) (err error) {
var dpath string
if dpath, err = d.Path(); err != nil {
@ -261,7 +279,7 @@ func (d *Dataset) Receive(inf *os.File, flags RecvFlags) (err error) {
defer C.free(unsafe.Pointer(cflags))
dest := C.CString(dpath)
defer C.free(unsafe.Pointer(dest))
ec := C.zfs_receive(C.libzfsHandle, dest, cflags, C.int(inf.Fd()), nil)
ec := C.zfs_receive(C.libzfsHandle, dest, nil, cflags, C.int(inf.Fd()), nil)
if ec != 0 {
err = fmt.Errorf("ZFS receive of %s failed. %s", C.GoString(dest), LastError().Error())
}

6
zfs.c
View File

@ -256,6 +256,10 @@ struct zfs_cmd *new_zfs_cmd(){
}
int estimate_send_size(struct zfs_cmd *zc) {
return zfs_ioctl(libzfsHandle, ZFS_IOC_SEND, zc);
int rc = zfs_ioctl(libzfsHandle, ZFS_IOC_SEND, zc);
if (rc != 0) {
rc = errno;
}
return rc;
}

125
zfs.go
View File

@ -11,6 +11,7 @@ import (
"errors"
"fmt"
"strings"
"time"
"unsafe"
)
@ -37,6 +38,12 @@ const (
DatasetTypeBookmark = (1 << 4)
)
// HoldTag - user holds tags
type HoldTag struct {
Name string
Timestamp time.Time
}
// Dataset - ZFS dataset object
type Dataset struct {
list C.dataset_list_ptr
@ -99,6 +106,16 @@ func DatasetCloseAll(datasets []Dataset) {
// DatasetOpen open dataset and all of its recursive children datasets
func DatasetOpen(path string) (d Dataset, err error) {
if d, err = DatasetOpenSingle(path); err != nil {
return
}
err = d.openChildren()
return
}
// DatasetOpenSingle open dataset without opening all of its recursive
// children datasets
func DatasetOpenSingle(path string) (d Dataset, err error) {
csPath := C.CString(path)
d.list = C.dataset_open(csPath)
C.free(unsafe.Pointer(csPath))
@ -117,7 +134,6 @@ func DatasetOpen(path string) (d Dataset, err error) {
if err != nil {
return
}
err = d.openChildren()
return
}
@ -166,8 +182,10 @@ func DatasetCreate(path string, dtype DatasetType,
// and cleanup dataset object/s from memory)
func (d *Dataset) Close() {
// path, _ := d.Path()
Global.Mtx.Lock()
C.dataset_list_close(d.list)
d.list = nil
Global.Mtx.Unlock()
for _, cd := range d.Children {
cd.Close()
}
@ -201,6 +219,16 @@ func (d *Dataset) Destroy(Defer bool) (err error) {
return
}
// IsSnapshot - retrun true if datset is snapshot
func (d *Dataset) IsSnapshot() (ok bool, err error) {
var path string
if path, err = d.Path(); err != nil {
return
}
ok = d.Type == DatasetTypeSnapshot || strings.Contains(path, "@")
return
}
// DestroyRecursive recursively destroy children of dataset and dataset.
func (d *Dataset) DestroyRecursive() (err error) {
var path string
@ -294,6 +322,8 @@ func (d *Dataset) GetProperty(p Prop) (prop Property, err error) {
err = errors.New(msgDatasetIsNil)
return
}
Global.Mtx.Lock()
defer Global.Mtx.Unlock()
plist := C.read_dataset_property(d.list, C.int(p))
if plist == nil {
err = LastError()
@ -306,6 +336,7 @@ func (d *Dataset) GetProperty(p Prop) (prop Property, err error) {
return
}
// GetUserProperty - lookup and return user propery
func (d *Dataset) GetUserProperty(p string) (prop Property, err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
@ -345,6 +376,7 @@ func (d *Dataset) SetProperty(p Prop, value string) (err error) {
return
}
// SetUserProperty -
func (d *Dataset) SetUserProperty(prop, value string) (err error) {
if d.list == nil {
err = errors.New(msgDatasetIsNil)
@ -464,6 +496,8 @@ func (d *Dataset) IsMounted() (mounted bool, where string) {
if d.list == nil {
return
}
Global.Mtx.Lock()
defer Global.Mtx.Unlock()
mp := C.dataset_is_mounted(d.list)
// defer C.free(mp)
if mounted = (mp != nil); mounted {
@ -475,6 +509,8 @@ func (d *Dataset) IsMounted() (mounted bool, where string) {
// Mount the given filesystem.
func (d *Dataset) Mount(options string, flags int) (err error) {
Global.Mtx.Lock()
defer Global.Mtx.Unlock()
if d.list == nil {
err = errors.New(msgDatasetIsNil)
return
@ -517,6 +553,93 @@ func (d *Dataset) UnmountAll(flags int) (err error) {
return d.Unmount(flags)
}
// Hold - Adds a single reference, named with the tag argument, to the snapshot.
// Each snapshot has its own tag namespace, and tags must be unique within that space.
func (d *Dataset) Hold(flag string) (err error) {
var path string
var pd Dataset
if path, err = d.Path(); err != nil {
return
}
if !strings.Contains(path, "@") {
err = fmt.Errorf("'%s' is not a snapshot", path)
return
}
pd, err = DatasetOpenSingle(path[:strings.Index(path, "@")])
if err != nil {
return
}
defer pd.Close()
csSnapName := C.CString(path[strings.Index(path, "@")+1:])
defer C.free(unsafe.Pointer(csSnapName))
csFlag := C.CString(flag)
defer C.free(unsafe.Pointer(csFlag))
if 0 != C.zfs_hold(pd.list.zh, csSnapName, csFlag, booleanT(false), -1) {
err = LastError()
}
return
}
// Release - Removes a single reference, named with the tag argument, from the specified snapshot.
// The tag must already exist for each snapshot. If a hold exists on a snapshot, attempts to destroy
// that snapshot by using the zfs destroy command return EBUSY.
func (d *Dataset) Release(flag string) (err error) {
var path string
var pd Dataset
if path, err = d.Path(); err != nil {
return
}
if !strings.Contains(path, "@") {
err = fmt.Errorf("'%s' is not a snapshot", path)
return
}
pd, err = DatasetOpenSingle(path[:strings.Index(path, "@")])
if err != nil {
return
}
defer pd.Close()
csSnapName := C.CString(path[strings.Index(path, "@")+1:])
defer C.free(unsafe.Pointer(csSnapName))
csFlag := C.CString(flag)
defer C.free(unsafe.Pointer(csFlag))
if 0 != C.zfs_release(pd.list.zh, csSnapName, csFlag, booleanT(false)) {
err = LastError()
}
return
}
// Holds - Lists all existing user references for the given snapshot
func (d *Dataset) Holds() (tags []HoldTag, err error) {
var nvl *C.nvlist_t
var nvp *C.nvpair_t
var tu64 C.uint64_t
var path string
if path, err = d.Path(); err != nil {
return
}
if !strings.Contains(path, "@") {
err = fmt.Errorf("'%s' is not a snapshot", path)
return
}
if 0 != C.zfs_get_holds(d.list.zh, &nvl) {
err = LastError()
return
}
defer C.nvlist_free(nvl)
tags = make([]HoldTag, 0, 5)
for nvp = C.nvlist_next_nvpair(nvl, nvp); nvp != nil; {
tag := C.nvpair_name(nvp)
C.nvpair_value_uint64(nvp, &tu64)
tags = append(tags, HoldTag{
Name: C.GoString(tag),
Timestamp: time.Unix(int64(tu64), 0),
})
nvp = C.nvlist_next_nvpair(nvl, nvp)
}
return
}
// DatasetPropertyToName convert property to name
// ( returns built in string representation of property name).
// This is optional, you can represent each property with string

View File

@ -147,6 +147,47 @@ func zfsTestDatasetSnapshot(t *testing.T) {
print("PASS\n\n")
}
func zfsTestDatasetHoldRelease(t *testing.T) {
println("TEST Hold/Release(", TSTDatasetPathSnap, ", true, ...) ... ")
d, err := zfs.DatasetOpen(TSTDatasetPathSnap)
if err != nil {
t.Error(err)
return
}
defer d.Close()
err = d.Hold("keep")
if err != nil {
t.Error(err)
return
}
var tags []zfs.HoldTag
tags, err = d.Holds()
if err != nil {
t.Error(err)
return
}
for _, tag := range tags {
println("tag:", tag.Name, "timestamp:", tag.Timestamp.String())
}
err = d.Release("keep")
if err != nil {
t.Error(err)
return
}
tags, err = d.Holds()
if err != nil {
t.Error(err)
return
}
for _, tag := range tags {
println("* tag:", tag.Name, "timestamp:", tag.Timestamp.String())
}
print("PASS\n\n")
}
func zfsTestDatasetDestroy(t *testing.T) {
println("TEST DATASET Destroy( ", TSTDatasetPath, " ) ... ")
d, err := zfs.DatasetOpen(TSTDatasetPath)

12
zpool.c
View File

@ -2,7 +2,11 @@
* using libzfs from go language, and make go code shorter and more readable.
*/
typedef unsigned long int rlim64_t;
#include <libzfs.h>
#include <libzfs/sys/zfs_context.h>
#include <memory.h>
#include <string.h>
#include <stdio.h>
@ -189,7 +193,7 @@ property_list_ptr read_zpool_property(zpool_list_ptr pool, int prop) {
property_list_ptr list = new_property_list();
r = zpool_get_prop(pool->zph, prop,
list->value, INT_MAX_VALUE, &source);
list->value, INT_MAX_VALUE, &source, B_FALSE);
if (r == 0) {
// strcpy(list->name, zpool_prop_to_name(prop));
zprop_source_tostr(list->source, source);
@ -492,11 +496,15 @@ nvlist_ptr get_zpool_vdev_tree(nvlist_ptr nv) {
nvlist_ptr go_zpool_search_import(libzfs_handle_ptr zfsh, int paths, char **path, boolean_t do_scan) {
importargs_t idata;
memset(&idata, 0, sizeof(importargs_t));
nvlist_ptr pools = NULL;
idata.path = path;
idata.paths = paths;
// idata.scan = 0;
return zpool_search_import(zfsh, &idata);
thread_init();
pools = zpool_search_import(zfsh, &idata);
thread_fini();
return pools;
}

View File

@ -1,5 +1,6 @@
package zfs
// #cgo CFLAGS: -D__USE_LARGEFILE64=1
// #include <stdlib.h>
// #include <libzfs.h>
// #include "common.h"
@ -915,6 +916,14 @@ func PoolCreate(name string, vdev VDevTree, features map[string]string,
features["filesystem_limits"] = FENABLED
features["large_blocks"] = FENABLED
// Enable 0.7.x features per default
features["multi_vdev_crash_dump"] = FENABLED
features["large_dnode"] = FENABLED
features["sha512"] = FENABLED
features["skein"] = FENABLED
features["edonr"] = FENABLED
features["userobj_accounting"] = FENABLED
// convert properties
cprops := toCPoolProperties(props)
if cprops != nil {
@ -1116,10 +1125,16 @@ func (s PoolStatus) String() string {
return "VERSION_NEWER"
case PoolStatusHostidMismatch: /* last accessed by another system */
return "HOSTID_MISMATCH"
case PoolStatusHosidActive: /* currently active on another system */
return "HOSTID_ACTIVE"
case PoolStatusHostidRequired: /* multihost=on and hostid=0 */
return "HOSTID_REQUIRED"
case PoolStatusIoFailureWait: /* failed I/O, failmode 'wait' */
return "FAILURE_WAIT"
case PoolStatusIoFailureContinue: /* failed I/O, failmode 'continue' */
return "FAILURE_CONTINUE"
case PoolStatusIOFailureMap: /* ailed MMP, failmode not 'panic' */
return "HOSTID_FAILURE_MAP"
case PoolStatusBadLog: /* cannot read log chain(s) */
return "BAD_LOG"
case PoolStatusErrata: /* informational errata available */

View File

@ -1,7 +1,6 @@
package zfs_test
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
@ -531,33 +530,33 @@ func ExamplePool_State() {
println("POOL TESTPOOL state:", zfs.PoolStateToName(pstate))
}
func TestPool_VDevTree(t *testing.T) {
type fields struct {
poolName string
}
tests := []struct {
name string
fields fields
wantErr bool
}{
// TODO: Add test cases.
{
name: "test1",
fields: fields{"NETSTOR"},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
pool, _ := zfs.PoolOpen(tt.fields.poolName)
defer pool.Close()
gotVdevs, err := pool.VDevTree()
if (err != nil) != tt.wantErr {
t.Errorf("Pool.VDevTree() error = %v, wantErr %v", err, tt.wantErr)
return
}
jsonData, _ := json.MarshalIndent(gotVdevs, "", "\t")
t.Logf("gotVdevs: %s", string(jsonData))
})
}
}
// func TestPool_VDevTree(t *testing.T) {
// type fields struct {
// poolName string
// }
// tests := []struct {
// name string
// fields fields
// wantErr bool
// }{
// // TODO: Add test cases.
// {
// name: "test1",
// fields: fields{"TESTPOOL"},
// wantErr: false,
// },
// }
// for _, tt := range tests {
// t.Run(tt.name, func(t *testing.T) {
// pool, _ := zfs.PoolOpen(tt.fields.poolName)
// defer pool.Close()
// gotVdevs, err := pool.VDevTree()
// if (err != nil) != tt.wantErr {
// t.Errorf("Pool.VDevTree() error = %v, wantErr %v", err, tt.wantErr)
// return
// }
// jsonData, _ := json.MarshalIndent(gotVdevs, "", "\t")
// t.Logf("gotVdevs: %s", string(jsonData))
// })
// }
// }