Merge branch 'dev-zfs-0.7.x'
This commit is contained in:
commit
99406fb402
14
README.md
14
README.md
|
@ -1,6 +1,12 @@
|
|||
# Introduction
|
||||
|
||||
**go-libzfs** currently implements basic manipulation of ZFS pools and data sets. Plan is to add more in further development, improve documentation with more examples, and add more tests. _go-libzfs_ use libzfs C library and does not wrap OpenZFS CLI tools. That way it ensure best performance. Per my personal opinion its more reliable way to do it, and that libzfs is less subject of possible changes then CLI tools. Goal is to let easy using and manipulating OpenZFS form with in go, and tries to map libzfs C library in to go style package respecting golang common practice.
|
||||
**go-libzfs** currently implements basic manipulation of ZFS pools and data sets. Plan is to add more in further development, improve documentation with more examples, and add more tests. _go-libzfs_ use libzfs C library and does not wrap OpenZFS CLI tools. Goal is to let easy using and manipulating OpenZFS form with in go, and tries to map libzfs C library in to go style package respecting golang common practice.
|
||||
|
||||
## Note
|
||||
This golang package is only used and tested on Linux.
|
||||
|
||||
- Version tagged as v0.1 is latest used and compatible with ZFS On Linux version 0.6.5.x
|
||||
- Version tagged as v0.2 is latest used and compatible with ZFS On Linux version 0.7.x
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/bicomsystems/go-libzfs?status.svg)](https://godoc.org/github.com/bicomsystems/go-libzfs)
|
||||
|
||||
|
@ -12,11 +18,13 @@
|
|||
- Creating, destroying and rollback of snapshots.
|
||||
- Cloning datasets and volumes.
|
||||
- Reading and modifying dataset and volume properties.
|
||||
- Send and receive snapshot streams
|
||||
|
||||
|
||||
## Requirements:
|
||||
|
||||
- OpenZFS and libzfs with development headers installed.
|
||||
- Developed using go1.4.2
|
||||
- OpenZFS on Linux and libzfs with development headers installed.
|
||||
- Developed using go1.9
|
||||
|
||||
## Installing
|
||||
|
||||
|
|
23
common.c
23
common.c
|
@ -54,3 +54,26 @@ nvlist_ptr new_property_nvlist() {
|
|||
int property_nvlist_add(nvlist_ptr list, const char *prop, const char *value) {
|
||||
return nvlist_add_string(list, prop, value);
|
||||
}
|
||||
|
||||
int redirect_libzfs_stdout(int to) {
|
||||
int save, res;
|
||||
save = dup(STDOUT_FILENO);
|
||||
if (save < 0) {
|
||||
return save;
|
||||
}
|
||||
res = dup2(to, STDOUT_FILENO);
|
||||
if (res < 0) {
|
||||
return res;
|
||||
}
|
||||
return save;
|
||||
}
|
||||
|
||||
int restore_libzfs_stdout(int saved) {
|
||||
int res;
|
||||
fflush(stdout);
|
||||
res = dup2(saved, STDOUT_FILENO);
|
||||
if (res < 0) {
|
||||
return res;
|
||||
}
|
||||
close(saved);
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
package zfs
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -I /usr/include/libzfs -I /usr/include/libspl -DHAVE_IOCTL_IN_SYS_IOCTL_H
|
||||
#cgo CFLAGS: -I /usr/include/libzfs -I /usr/include/libspl -DHAVE_IOCTL_IN_SYS_IOCTL_H -D_GNU_SOURCE
|
||||
#cgo LDFLAGS: -lzfs -lzpool -lnvpair
|
||||
|
||||
#include <stdlib.h>
|
||||
|
|
6
common.h
6
common.h
|
@ -2,6 +2,9 @@
|
|||
* using libzfs from go language, make go code shorter and more readable.
|
||||
*/
|
||||
|
||||
#ifndef loff_t
|
||||
#define loff_t off_t
|
||||
#endif
|
||||
#define INT_MAX_NAME 256
|
||||
#define INT_MAX_VALUE 1024
|
||||
#define ZAP_OLDMAXVALUELEN 1024
|
||||
|
@ -35,3 +38,6 @@ void free_properties(property_list_t *root);
|
|||
nvlist_ptr new_property_nvlist();
|
||||
int property_nvlist_add(nvlist_ptr ptr, const char* prop, const char *value);
|
||||
|
||||
int redirect_libzfs_stdout(int to);
|
||||
int restore_libzfs_stdout(int saved);
|
||||
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
package zfs_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
zfs "github.com/bicomsystems/go-libzfs"
|
||||
)
|
||||
|
||||
func TestDataset_DestroyPromote(t *testing.T) {
|
||||
zpoolTestPoolCreate(t)
|
||||
// defer zpoolTestPoolDestroy(t)
|
||||
var c1, c2 zfs.Dataset
|
||||
|
||||
props := make(map[zfs.Prop]zfs.Property)
|
||||
|
||||
d, err := zfs.DatasetCreate(TSTPoolName+"/original",
|
||||
zfs.DatasetTypeFilesystem, make(map[zfs.Prop]zfs.Property))
|
||||
if err != nil {
|
||||
t.Errorf("DatasetCreate(\"%s/original\") error: %v", TSTPoolName, err)
|
||||
return
|
||||
}
|
||||
|
||||
s1, _ := zfs.DatasetSnapshot(d.Properties[zfs.DatasetPropName].Value+"@snap2", false, props)
|
||||
s2, _ := zfs.DatasetSnapshot(d.Properties[zfs.DatasetPropName].Value+"@snap1", false, props)
|
||||
|
||||
c1, err = s1.Clone(TSTPoolName+"/clone1", nil)
|
||||
if err != nil {
|
||||
t.Errorf("d.Clone(\"%s/clone1\", props)) error: %v", TSTPoolName, err)
|
||||
d.Close()
|
||||
return
|
||||
}
|
||||
|
||||
zfs.DatasetSnapshot(c1.Properties[zfs.DatasetPropName].Value+"@snap1", false, props)
|
||||
|
||||
c2, err = s2.Clone(TSTPoolName+"/clone2", nil)
|
||||
if err != nil {
|
||||
t.Errorf("c1.Clone(\"%s/clone1\", props)) error: %v", TSTPoolName, err)
|
||||
d.Close()
|
||||
c1.Close()
|
||||
return
|
||||
}
|
||||
s2.Close()
|
||||
|
||||
zfs.DatasetSnapshot(c2.Properties[zfs.DatasetPropName].Value+"@snap0", false, props)
|
||||
c1.Close()
|
||||
c2.Close()
|
||||
|
||||
// reopen pool
|
||||
d.Close()
|
||||
if d, err = zfs.DatasetOpen(TSTPoolName + "/original"); err != nil {
|
||||
t.Error("zfs.DatasetOpen")
|
||||
return
|
||||
}
|
||||
|
||||
if err = d.DestroyPromote(); err != nil {
|
||||
t.Errorf("DestroyPromote error: %v", err)
|
||||
d.Close()
|
||||
return
|
||||
}
|
||||
t.Log("Destroy promote completed with success")
|
||||
d.Close()
|
||||
zpoolTestPoolDestroy(t)
|
||||
}
|
|
@ -230,13 +230,12 @@ func (d *Dataset) SendSize(FromName string, flags SendFlags) (size int64, err er
|
|||
defer r.Close()
|
||||
go func() {
|
||||
var tmpe error
|
||||
saveOut := C.dup(C.fileno(C.stdout))
|
||||
if res := C.dup2(C.int(w.Fd()), C.fileno(C.stdout)); res < 0 {
|
||||
tmpe = fmt.Errorf("Redirection of zfslib stdout failed %d", res)
|
||||
saveOut := C.redirect_libzfs_stdout(C.int(w.Fd()))
|
||||
if saveOut < 0 {
|
||||
tmpe = fmt.Errorf("Redirection of zfslib stdout failed %d", saveOut)
|
||||
} else {
|
||||
tmpe = d.send(FromName, w, &flags)
|
||||
C.fflush(C.stdout)
|
||||
C.dup2(saveOut, C.fileno(C.stdout))
|
||||
C.restore_libzfs_stdout(saveOut)
|
||||
}
|
||||
w.Close()
|
||||
errch <- tmpe
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
package zfs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type clonesCreateDesc []Dataset
|
||||
|
||||
func (list clonesCreateDesc) Less(i, j int) bool {
|
||||
_, oki := list[i].Properties[DatasetNumProps+1000]
|
||||
_, okj := list[i].Properties[DatasetNumProps+1000]
|
||||
if oki && okj {
|
||||
unixti, err := strconv.ParseInt(
|
||||
list[i].Properties[DatasetNumProps+1000].Value, 10, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
unixtj, err := strconv.ParseInt(
|
||||
list[j].Properties[DatasetNumProps+1000].Value, 10, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if unixti != unixtj {
|
||||
return unixti > unixtj
|
||||
}
|
||||
}
|
||||
|
||||
// if we have two datasets created from same snapshot
|
||||
// any of them will do, but we will go for most recent
|
||||
unixti, err := strconv.ParseInt(
|
||||
list[i].Properties[DatasetPropCreateTXG].Value, 10, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
unixtj, err := strconv.ParseInt(
|
||||
list[j].Properties[DatasetPropCreateTXG].Value, 10, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return unixti > unixtj
|
||||
}
|
||||
|
||||
func (list clonesCreateDesc) Swap(i, j int) {
|
||||
list[i], list[j] = list[j], list[i]
|
||||
}
|
||||
|
||||
func (list clonesCreateDesc) Len() int {
|
||||
return len(list)
|
||||
}
|
185
zfs.go
185
zfs.go
|
@ -10,6 +10,8 @@ import "C"
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
@ -191,6 +193,13 @@ func (d *Dataset) Close() {
|
|||
}
|
||||
}
|
||||
|
||||
// reOpen - close and open dataset. Not thread safe!
|
||||
func (d *Dataset) reOpen() (err error) {
|
||||
d.Close()
|
||||
*d, err = DatasetOpen(d.Properties[DatasetPropName].Value)
|
||||
return
|
||||
}
|
||||
|
||||
// Destroy destroys the dataset. The caller must make sure that the filesystem
|
||||
// isn't mounted, and that there are no active dependents. Set Defer argument
|
||||
// to true to defer destruction for when dataset is not in use. Call Close() to
|
||||
|
@ -220,12 +229,9 @@ func (d *Dataset) Destroy(Defer bool) (err error) {
|
|||
}
|
||||
|
||||
// IsSnapshot - retrun true if datset is snapshot
|
||||
func (d *Dataset) IsSnapshot() (ok bool, err error) {
|
||||
var path string
|
||||
if path, err = d.Path(); err != nil {
|
||||
return
|
||||
}
|
||||
ok = d.Type == DatasetTypeSnapshot || strings.Contains(path, "@")
|
||||
func (d *Dataset) IsSnapshot() (ok bool) {
|
||||
path := d.Properties[DatasetPropName].Value
|
||||
ok = (d.Type == DatasetTypeSnapshot || strings.Contains(path, "@"))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -294,6 +300,16 @@ func (d *Dataset) Pool() (p Pool, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// PoolName - return name of the pool
|
||||
func (d *Dataset) PoolName() string {
|
||||
path := d.Properties[DatasetPropName].Value
|
||||
i := strings.Index(path, "/")
|
||||
if i < 0 {
|
||||
return path
|
||||
}
|
||||
return path[0:i]
|
||||
}
|
||||
|
||||
// ReloadProperties re-read dataset's properties
|
||||
func (d *Dataset) ReloadProperties() (err error) {
|
||||
if d.list == nil {
|
||||
|
@ -652,3 +668,160 @@ func DatasetPropertyToName(p Prop) (name string) {
|
|||
name = C.GoString(C.zfs_prop_to_name(prop))
|
||||
return
|
||||
}
|
||||
|
||||
// DestroyPromote - Same as DestroyRecursive() except it will not destroy
|
||||
// any dependent clones, but promote them first.
|
||||
// This function will navigate any dependency chain
|
||||
// of cloned datasets using breadth first search to promote according and let
|
||||
// you remove dataset regardless of its cloned dependencies.
|
||||
// Note: that this function wan't work when you want to destroy snapshot this way.
|
||||
// However it will destroy all snaphsot of destroyed dataset without dependencies,
|
||||
// otherwise snapshot will move to promoted clone
|
||||
func (d *Dataset) DestroyPromote() (err error) {
|
||||
var snaps []Dataset
|
||||
var clones []string
|
||||
// We need to save list of child snapshots, to destroy them latter
|
||||
// since they will be moved to promoted clone
|
||||
var psnaps []string
|
||||
if clones, err = d.Clones(); err != nil {
|
||||
return
|
||||
}
|
||||
if len(clones) > 0 {
|
||||
var cds Dataset
|
||||
// For this to always work we need to promote youngest clone
|
||||
// in terms of most recent origin snapshot or creation time if
|
||||
// cloned from same snapshot
|
||||
if cds, err = DatasetOpen(clones[0]); err != nil {
|
||||
return
|
||||
}
|
||||
defer cds.Close()
|
||||
// since promote will move the snapshots to promoted dataset
|
||||
// we need to check and resolve possible name conflicts
|
||||
if snaps, err = d.Snapshots(); err != nil {
|
||||
return
|
||||
}
|
||||
for _, s := range snaps {
|
||||
spath := s.Properties[DatasetPropName].Value
|
||||
sname := spath[strings.Index(spath, "@"):]
|
||||
// conflict and resolve
|
||||
if ok, _ := cds.FindSnapshotName(sname); ok {
|
||||
// snapshot with the same name already exist
|
||||
volname := path.Base(spath[:strings.Index(spath, "@")])
|
||||
sname = sname + "." + volname
|
||||
if err = s.Rename(spath+"."+volname, false, true); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
psnaps = append(psnaps, sname)
|
||||
}
|
||||
if err = cds.Promote(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
// destroy child datasets, since this works recursive
|
||||
for _, cd := range d.Children {
|
||||
if err = cd.DestroyPromote(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
d.Children = make([]Dataset, 0)
|
||||
if err = d.Destroy(false); err != nil {
|
||||
return
|
||||
}
|
||||
// Load with new promoted snapshots
|
||||
if len(clones) > 0 && len(psnaps) > 0 {
|
||||
var cds Dataset
|
||||
if cds, err = DatasetOpen(clones[0]); err != nil {
|
||||
return
|
||||
}
|
||||
defer cds.Close()
|
||||
// try to destroy (promoted) snapshots now
|
||||
for _, sname := range psnaps {
|
||||
if ok, snap := cds.FindSnapshotName(sname); ok {
|
||||
snap.Destroy(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Snapshots - filter and return all snapshots of dataset
|
||||
func (d *Dataset) Snapshots() (snaps []Dataset, err error) {
|
||||
for _, ch := range d.Children {
|
||||
if !ch.IsSnapshot() {
|
||||
continue
|
||||
}
|
||||
snaps = append(snaps, ch)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// FindSnapshot - returns true if given path is one of dataset snaphsots
|
||||
func (d *Dataset) FindSnapshot(path string) (ok bool, snap Dataset) {
|
||||
for _, ch := range d.Children {
|
||||
if !ch.IsSnapshot() {
|
||||
continue
|
||||
}
|
||||
if ok = (path == ch.Properties[DatasetPropName].Value); ok {
|
||||
snap = ch
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// FindSnapshotName - returns true and snapshot if given snapshot
|
||||
// name eg. '@snap1' is one of dataset snaphsots
|
||||
func (d *Dataset) FindSnapshotName(name string) (ok bool, snap Dataset) {
|
||||
return d.FindSnapshot(d.Properties[DatasetPropName].Value + name)
|
||||
}
|
||||
|
||||
// Clones - get list of all dataset paths cloned from this
|
||||
// dataset or this snapshot
|
||||
// List is sorted descedent by origin snapshot order
|
||||
func (d *Dataset) Clones() (clones []string, err error) {
|
||||
// Clones can only live on same pool
|
||||
var root Dataset
|
||||
var sortDesc []Dataset
|
||||
if root, err = DatasetOpen(d.PoolName()); err != nil {
|
||||
return
|
||||
}
|
||||
defer root.Close()
|
||||
dIsSnapshot := d.IsSnapshot()
|
||||
// USe breadth first search to find all clones
|
||||
queue := make(chan Dataset, 1024)
|
||||
defer close(queue) // This will close and cleanup all
|
||||
queue <- root // start from the root element
|
||||
for {
|
||||
select {
|
||||
case ds := <-queue: // pull from queue (breadth first search)
|
||||
for _, ch := range ds.Children {
|
||||
origin := ch.Properties[DatasetPropOrigin].Value
|
||||
if len(origin) > 0 {
|
||||
if dIsSnapshot && origin == d.Properties[DatasetPropName].Value {
|
||||
// if this dataset is snaphot
|
||||
ch.Properties[DatasetNumProps+1000] = d.Properties[DatasetPropCreateTXG]
|
||||
sortDesc = append(sortDesc, ch)
|
||||
} else {
|
||||
// Check if origin of this dataset is one of snapshots
|
||||
ok, snap := d.FindSnapshot(origin)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
ch.Properties[DatasetNumProps+1000] = snap.Properties[DatasetPropCreateTXG]
|
||||
sortDesc = append(sortDesc, ch)
|
||||
}
|
||||
}
|
||||
queue <- ch
|
||||
}
|
||||
default:
|
||||
sort.Sort(clonesCreateDesc(sortDesc))
|
||||
// This way we get clones ordered from most recent sanpshots first
|
||||
for _, c := range sortDesc {
|
||||
clones = append(clones, c.Properties[DatasetPropName].Value)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
2
zpool.h
2
zpool.h
|
@ -79,7 +79,7 @@ nvlist_ptr get_zpool_vdev_tree(nvlist_ptr nv);
|
|||
|
||||
nvlist_ptr go_zpool_search_import(libzfs_handle_ptr zfsh, int paths, char **path, boolean_t do_scan);
|
||||
|
||||
__uint64_t set_zpool_vdev_online(zpool_list_t *pool, const char *path, int flags);
|
||||
uint64_t set_zpool_vdev_online(zpool_list_t *pool, const char *path, int flags);
|
||||
int set_zpool_vdev_offline(zpool_list_t *pool, const char *path, boolean_t istmp, boolean_t force);
|
||||
int do_zpool_clear(zpool_list_t *pool, const char *device, u_int32_t rewind_policy);
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include "zpool.h"
|
||||
|
||||
|
||||
__uint64_t set_zpool_vdev_online(zpool_list_t *pool, const char *path, int flags) {
|
||||
uint64_t set_zpool_vdev_online(zpool_list_t *pool, const char *path, int flags) {
|
||||
vdev_state_t newstate = VDEV_STATE_UNKNOWN;
|
||||
zpool_vdev_online(pool->zph, path, flags, &newstate);
|
||||
return newstate;
|
||||
|
|
Loading…
Reference in New Issue