2015-04-19 23:25:52 +02:00
|
|
|
package zfs
|
|
|
|
|
|
|
|
// #include <stdlib.h>
|
|
|
|
// #include <libzfs.h>
|
2017-06-02 08:42:14 +02:00
|
|
|
// #include "common.h"
|
2015-04-19 23:25:52 +02:00
|
|
|
// #include "zpool.h"
|
|
|
|
// #include "zfs.h"
|
|
|
|
import "C"
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2017-02-23 11:29:17 +01:00
|
|
|
"fmt"
|
2018-12-17 14:01:59 +01:00
|
|
|
"path"
|
|
|
|
"sort"
|
2017-08-04 13:12:41 +02:00
|
|
|
"strings"
|
2018-11-02 11:15:55 +01:00
|
|
|
"time"
|
2016-07-26 21:14:28 +02:00
|
|
|
"unsafe"
|
2015-04-19 23:25:52 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
msgDatasetIsNil = "Dataset handle not initialized or its closed"
|
|
|
|
)
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// DatasetProperties type is map of dataset or volume properties prop -> value
|
|
|
|
type DatasetProperties map[Prop]string
|
|
|
|
|
|
|
|
// DatasetType defines enum of dataset types
|
2015-04-19 23:25:52 +02:00
|
|
|
type DatasetType int32
|
|
|
|
|
|
|
|
const (
|
2015-12-04 23:05:19 +01:00
|
|
|
// DatasetTypeFilesystem - file system dataset
|
2015-04-19 23:25:52 +02:00
|
|
|
DatasetTypeFilesystem DatasetType = (1 << 0)
|
2015-12-04 23:05:19 +01:00
|
|
|
// DatasetTypeSnapshot - snapshot of dataset
|
|
|
|
DatasetTypeSnapshot = (1 << 1)
|
|
|
|
// DatasetTypeVolume - volume (virtual block device) dataset
|
|
|
|
DatasetTypeVolume = (1 << 2)
|
|
|
|
// DatasetTypePool - pool dataset
|
|
|
|
DatasetTypePool = (1 << 3)
|
|
|
|
// DatasetTypeBookmark - bookmark dataset
|
|
|
|
DatasetTypeBookmark = (1 << 4)
|
2015-04-19 23:25:52 +02:00
|
|
|
)
|
|
|
|
|
2018-11-02 11:15:55 +01:00
|
|
|
// HoldTag - user holds tags
|
|
|
|
type HoldTag struct {
|
|
|
|
Name string
|
|
|
|
Timestamp time.Time
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// Dataset - ZFS dataset object
|
2015-04-19 23:25:52 +02:00
|
|
|
type Dataset struct {
|
2017-02-23 11:29:17 +01:00
|
|
|
list C.dataset_list_ptr
|
2015-04-19 23:25:52 +02:00
|
|
|
Type DatasetType
|
2015-12-04 23:05:19 +01:00
|
|
|
Properties map[Prop]Property
|
2015-04-19 23:25:52 +02:00
|
|
|
Children []Dataset
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Dataset) openChildren() (err error) {
|
|
|
|
d.Children = make([]Dataset, 0, 5)
|
2017-06-02 08:42:14 +02:00
|
|
|
list := C.dataset_list_children(d.list)
|
2017-02-23 11:29:17 +01:00
|
|
|
for list != nil {
|
|
|
|
dataset := Dataset{list: list}
|
2017-06-02 08:42:14 +02:00
|
|
|
dataset.Type = DatasetType(C.dataset_type(d.list))
|
2015-12-04 23:05:19 +01:00
|
|
|
dataset.Properties = make(map[Prop]Property)
|
2015-04-19 23:25:52 +02:00
|
|
|
err = dataset.ReloadProperties()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
d.Children = append(d.Children, dataset)
|
2017-02-23 11:29:17 +01:00
|
|
|
list = C.dataset_next(list)
|
2015-04-19 23:25:52 +02:00
|
|
|
}
|
2015-12-04 23:05:19 +01:00
|
|
|
for ci := range d.Children {
|
2015-04-19 23:25:52 +02:00
|
|
|
if err = d.Children[ci].openChildren(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// DatasetOpenAll recursive get handles to all available datasets on system
|
2015-04-19 23:25:52 +02:00
|
|
|
// (file-systems, volumes or snapshots).
|
|
|
|
func DatasetOpenAll() (datasets []Dataset, err error) {
|
|
|
|
var dataset Dataset
|
2017-06-02 08:42:14 +02:00
|
|
|
dataset.list = C.dataset_list_root()
|
2015-04-19 23:25:52 +02:00
|
|
|
for dataset.list != nil {
|
2017-06-02 08:42:14 +02:00
|
|
|
dataset.Type = DatasetType(C.dataset_type(dataset.list))
|
2015-04-19 23:25:52 +02:00
|
|
|
err = dataset.ReloadProperties()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
datasets = append(datasets, dataset)
|
|
|
|
dataset.list = C.dataset_next(dataset.list)
|
|
|
|
}
|
2015-12-04 23:05:19 +01:00
|
|
|
for ci := range datasets {
|
2015-04-19 23:25:52 +02:00
|
|
|
if err = datasets[ci].openChildren(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// DatasetCloseAll close all datasets in slice and all of its recursive
|
|
|
|
// children datasets
|
2015-04-19 23:25:52 +02:00
|
|
|
func DatasetCloseAll(datasets []Dataset) {
|
|
|
|
for _, d := range datasets {
|
|
|
|
d.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// DatasetOpen open dataset and all of its recursive children datasets
|
2015-04-19 23:25:52 +02:00
|
|
|
func DatasetOpen(path string) (d Dataset, err error) {
|
2018-11-02 11:15:55 +01:00
|
|
|
if d, err = DatasetOpenSingle(path); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
err = d.openChildren()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// DatasetOpenSingle open dataset without opening all of its recursive
|
|
|
|
// children datasets
|
|
|
|
func DatasetOpenSingle(path string) (d Dataset, err error) {
|
2016-07-26 21:14:28 +02:00
|
|
|
csPath := C.CString(path)
|
2017-06-02 08:42:14 +02:00
|
|
|
d.list = C.dataset_open(csPath)
|
2016-07-26 21:14:28 +02:00
|
|
|
C.free(unsafe.Pointer(csPath))
|
2015-04-19 23:25:52 +02:00
|
|
|
|
2017-06-02 08:42:14 +02:00
|
|
|
if d.list == nil || d.list.zh == nil {
|
2015-04-19 23:25:52 +02:00
|
|
|
err = LastError()
|
2017-02-23 11:29:17 +01:00
|
|
|
if err == nil {
|
|
|
|
err = fmt.Errorf("dataset not found.")
|
|
|
|
}
|
2017-06-15 14:12:39 +02:00
|
|
|
err = fmt.Errorf("%s - %s", err.Error(), path)
|
2015-04-19 23:25:52 +02:00
|
|
|
return
|
|
|
|
}
|
2017-06-02 08:42:14 +02:00
|
|
|
d.Type = DatasetType(C.dataset_type(d.list))
|
2015-12-04 23:05:19 +01:00
|
|
|
d.Properties = make(map[Prop]Property)
|
2015-04-19 23:25:52 +02:00
|
|
|
err = d.ReloadProperties()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
func datasetPropertiesTonvlist(props map[Prop]Property) (
|
2017-02-23 11:29:17 +01:00
|
|
|
cprops C.nvlist_ptr, err error) {
|
2015-04-19 23:25:52 +02:00
|
|
|
// convert properties to nvlist C type
|
2017-06-02 08:42:14 +02:00
|
|
|
cprops = C.new_property_nvlist()
|
|
|
|
if cprops == nil {
|
2015-04-19 23:25:52 +02:00
|
|
|
err = errors.New("Failed to allocate properties")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for prop, value := range props {
|
2016-07-26 21:14:28 +02:00
|
|
|
csValue := C.CString(value.Value)
|
2017-06-02 08:42:14 +02:00
|
|
|
r := C.property_nvlist_add(
|
|
|
|
cprops, C.zfs_prop_to_name(C.zfs_prop_t(prop)), csValue)
|
2016-07-26 21:14:28 +02:00
|
|
|
C.free(unsafe.Pointer(csValue))
|
2015-04-19 23:25:52 +02:00
|
|
|
if r != 0 {
|
|
|
|
err = errors.New("Failed to convert property")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// DatasetCreate create a new filesystem or volume on path representing
|
|
|
|
// pool/dataset or pool/parent/dataset
|
2015-04-19 23:25:52 +02:00
|
|
|
func DatasetCreate(path string, dtype DatasetType,
|
2015-12-04 23:05:19 +01:00
|
|
|
props map[Prop]Property) (d Dataset, err error) {
|
2017-02-23 11:29:17 +01:00
|
|
|
var cprops C.nvlist_ptr
|
2015-12-04 23:05:19 +01:00
|
|
|
if cprops, err = datasetPropertiesTonvlist(props); err != nil {
|
2015-04-19 23:25:52 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
defer C.nvlist_free(cprops)
|
|
|
|
|
2016-07-26 21:14:28 +02:00
|
|
|
csPath := C.CString(path)
|
2017-06-02 08:42:14 +02:00
|
|
|
errcode := C.dataset_create(csPath, C.zfs_type_t(dtype), cprops)
|
2016-07-26 21:14:28 +02:00
|
|
|
C.free(unsafe.Pointer(csPath))
|
2015-04-19 23:25:52 +02:00
|
|
|
if errcode != 0 {
|
|
|
|
err = LastError()
|
2017-06-02 08:42:14 +02:00
|
|
|
return
|
2015-04-19 23:25:52 +02:00
|
|
|
}
|
2017-06-02 08:42:14 +02:00
|
|
|
return DatasetOpen(path)
|
2015-04-19 23:25:52 +02:00
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// Close close dataset and all its recursive children datasets (close handle
|
|
|
|
// and cleanup dataset object/s from memory)
|
2015-04-19 23:25:52 +02:00
|
|
|
func (d *Dataset) Close() {
|
2017-08-04 13:12:41 +02:00
|
|
|
// path, _ := d.Path()
|
2018-11-09 16:08:20 +01:00
|
|
|
Global.Mtx.Lock()
|
2017-08-04 13:12:41 +02:00
|
|
|
C.dataset_list_close(d.list)
|
|
|
|
d.list = nil
|
2018-11-09 16:08:20 +01:00
|
|
|
Global.Mtx.Unlock()
|
2015-04-19 23:25:52 +02:00
|
|
|
for _, cd := range d.Children {
|
|
|
|
cd.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-17 14:01:59 +01:00
|
|
|
// reOpen - close and open dataset. Not thread safe!
|
|
|
|
func (d *Dataset) reOpen() (err error) {
|
|
|
|
d.Close()
|
|
|
|
*d, err = DatasetOpen(d.Properties[DatasetPropName].Value)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// Destroy destroys the dataset. The caller must make sure that the filesystem
|
2015-05-07 22:59:44 +02:00
|
|
|
// isn't mounted, and that there are no active dependents. Set Defer argument
|
2017-06-02 08:42:14 +02:00
|
|
|
// to true to defer destruction for when dataset is not in use. Call Close() to
|
|
|
|
// cleanup memory.
|
2015-04-19 23:25:52 +02:00
|
|
|
func (d *Dataset) Destroy(Defer bool) (err error) {
|
2015-05-07 22:59:44 +02:00
|
|
|
if len(d.Children) > 0 {
|
|
|
|
path, e := d.Path()
|
|
|
|
if e != nil {
|
|
|
|
return
|
|
|
|
}
|
2015-12-04 23:05:19 +01:00
|
|
|
dsType, e := d.GetProperty(DatasetPropType)
|
2016-07-26 21:14:28 +02:00
|
|
|
if e != nil {
|
|
|
|
dsType.Value = err.Error() // just put error (why it didn't fetch property type)
|
|
|
|
}
|
2015-05-07 22:59:44 +02:00
|
|
|
err = errors.New("Cannot destroy dataset " + path +
|
|
|
|
": " + dsType.Value + " has children")
|
|
|
|
return
|
|
|
|
}
|
2015-04-19 23:25:52 +02:00
|
|
|
if d.list != nil {
|
2017-06-02 08:42:14 +02:00
|
|
|
if ec := C.dataset_destroy(d.list, booleanT(Defer)); ec != 0 {
|
2015-04-19 23:25:52 +02:00
|
|
|
err = LastError()
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err = errors.New(msgDatasetIsNil)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-11-02 11:15:55 +01:00
|
|
|
// IsSnapshot - retrun true if datset is snapshot
|
2018-12-17 14:01:59 +01:00
|
|
|
func (d *Dataset) IsSnapshot() (ok bool) {
|
|
|
|
path := d.Properties[DatasetPropName].Value
|
|
|
|
ok = (d.Type == DatasetTypeSnapshot || strings.Contains(path, "@"))
|
2018-11-02 11:15:55 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// DestroyRecursive recursively destroy children of dataset and dataset.
|
2015-05-07 22:59:44 +02:00
|
|
|
func (d *Dataset) DestroyRecursive() (err error) {
|
2017-08-04 13:12:41 +02:00
|
|
|
var path string
|
|
|
|
if path, err = d.Path(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !strings.Contains(path, "@") { // not snapshot
|
|
|
|
if len(d.Children) > 0 {
|
|
|
|
for _, c := range d.Children {
|
|
|
|
if err = c.DestroyRecursive(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// close handle to destroyed child dataset
|
|
|
|
c.Close()
|
|
|
|
}
|
|
|
|
// clear closed children array
|
|
|
|
d.Children = make([]Dataset, 0)
|
|
|
|
}
|
|
|
|
err = d.Destroy(false)
|
|
|
|
} else {
|
|
|
|
var parent Dataset
|
|
|
|
tmp := strings.Split(path, "@")
|
|
|
|
ppath, snapname := tmp[0], tmp[1]
|
|
|
|
if parent, err = DatasetOpen(ppath); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer parent.Close()
|
|
|
|
if len(parent.Children) > 0 {
|
|
|
|
for _, c := range parent.Children {
|
|
|
|
if path, err = c.Path(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if strings.Contains(path, "@") {
|
|
|
|
continue // skip other snapshots
|
|
|
|
}
|
|
|
|
if c, err = DatasetOpen(path + "@" + snapname); err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err = c.DestroyRecursive(); err != nil {
|
|
|
|
c.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.Close()
|
2015-05-07 22:59:44 +02:00
|
|
|
}
|
|
|
|
}
|
2017-08-04 13:12:41 +02:00
|
|
|
err = d.Destroy(false)
|
2015-05-07 22:59:44 +02:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// Pool returns pool dataset belongs to
|
2015-04-19 23:25:52 +02:00
|
|
|
func (d *Dataset) Pool() (p Pool, err error) {
|
|
|
|
if d.list == nil {
|
|
|
|
err = errors.New(msgDatasetIsNil)
|
|
|
|
return
|
|
|
|
}
|
2017-06-02 08:42:14 +02:00
|
|
|
p.list = C.dataset_get_pool(d.list)
|
|
|
|
if p.list != nil && p.list.zph != nil {
|
2015-04-19 23:25:52 +02:00
|
|
|
err = p.ReloadProperties()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
err = LastError()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-12-17 14:01:59 +01:00
|
|
|
// PoolName - return name of the pool
|
|
|
|
func (d *Dataset) PoolName() string {
|
|
|
|
path := d.Properties[DatasetPropName].Value
|
|
|
|
i := strings.Index(path, "/")
|
|
|
|
if i < 0 {
|
|
|
|
return path
|
|
|
|
}
|
|
|
|
return path[0:i]
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// ReloadProperties re-read dataset's properties
|
2015-04-19 23:25:52 +02:00
|
|
|
func (d *Dataset) ReloadProperties() (err error) {
|
|
|
|
if d.list == nil {
|
|
|
|
err = errors.New(msgDatasetIsNil)
|
|
|
|
return
|
|
|
|
}
|
2015-12-04 23:05:19 +01:00
|
|
|
d.Properties = make(map[Prop]Property)
|
2017-08-04 13:12:41 +02:00
|
|
|
Global.Mtx.Lock()
|
|
|
|
defer Global.Mtx.Unlock()
|
2019-12-18 09:25:03 +01:00
|
|
|
C.zfs_refresh_properties(d.list.zh)
|
2015-12-04 23:05:19 +01:00
|
|
|
for prop := DatasetPropType; prop < DatasetNumProps; prop++ {
|
2017-06-02 08:42:14 +02:00
|
|
|
plist := C.read_dataset_property(d.list, C.int(prop))
|
|
|
|
if plist == nil {
|
2015-04-19 23:25:52 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
d.Properties[prop] = Property{Value: C.GoString(&(*plist).value[0]),
|
|
|
|
Source: C.GoString(&(*plist).source[0])}
|
2017-02-23 11:29:17 +01:00
|
|
|
C.free_properties(plist)
|
2015-04-19 23:25:52 +02:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// GetProperty reload and return single specified property. This also reloads requested
|
2015-04-19 23:25:52 +02:00
|
|
|
// property in Properties map.
|
2015-12-04 23:05:19 +01:00
|
|
|
func (d *Dataset) GetProperty(p Prop) (prop Property, err error) {
|
2015-04-19 23:25:52 +02:00
|
|
|
if d.list == nil {
|
|
|
|
err = errors.New(msgDatasetIsNil)
|
|
|
|
return
|
|
|
|
}
|
2018-11-09 16:08:20 +01:00
|
|
|
Global.Mtx.Lock()
|
|
|
|
defer Global.Mtx.Unlock()
|
2017-06-02 08:42:14 +02:00
|
|
|
plist := C.read_dataset_property(d.list, C.int(p))
|
|
|
|
if plist == nil {
|
2015-04-19 23:25:52 +02:00
|
|
|
err = LastError()
|
|
|
|
return
|
|
|
|
}
|
2017-06-02 08:42:14 +02:00
|
|
|
defer C.free_properties(plist)
|
2015-04-19 23:25:52 +02:00
|
|
|
prop = Property{Value: C.GoString(&(*plist).value[0]),
|
|
|
|
Source: C.GoString(&(*plist).source[0])}
|
|
|
|
d.Properties[p] = prop
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-11-02 11:15:55 +01:00
|
|
|
// GetUserProperty - lookup and return user propery
|
2017-01-16 14:07:17 +01:00
|
|
|
func (d *Dataset) GetUserProperty(p string) (prop Property, err error) {
|
|
|
|
if d.list == nil {
|
|
|
|
err = errors.New(msgDatasetIsNil)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
csp := C.CString(p)
|
|
|
|
defer C.free(unsafe.Pointer(csp))
|
2017-06-02 08:42:14 +02:00
|
|
|
plist := C.read_user_property(d.list, csp)
|
|
|
|
if plist == nil {
|
2017-01-16 14:07:17 +01:00
|
|
|
err = LastError()
|
|
|
|
return
|
|
|
|
}
|
2017-06-02 08:42:14 +02:00
|
|
|
defer C.free_properties(plist)
|
2017-01-16 14:07:17 +01:00
|
|
|
prop = Property{Value: C.GoString(&(*plist).value[0]),
|
|
|
|
Source: C.GoString(&(*plist).source[0])}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// SetProperty set ZFS dataset property to value. Not all properties can be set,
|
2015-04-19 23:25:52 +02:00
|
|
|
// some can be set only at creation time and some are read only.
|
|
|
|
// Always check if returned error and its description.
|
2015-12-04 23:05:19 +01:00
|
|
|
func (d *Dataset) SetProperty(p Prop, value string) (err error) {
|
2015-04-19 23:25:52 +02:00
|
|
|
if d.list == nil {
|
|
|
|
err = errors.New(msgDatasetIsNil)
|
|
|
|
return
|
|
|
|
}
|
2016-07-26 21:14:28 +02:00
|
|
|
csValue := C.CString(value)
|
2017-06-02 08:42:14 +02:00
|
|
|
errcode := C.dataset_prop_set(d.list, C.zfs_prop_t(p), csValue)
|
2016-07-26 21:14:28 +02:00
|
|
|
C.free(unsafe.Pointer(csValue))
|
2015-04-19 23:25:52 +02:00
|
|
|
if errcode != 0 {
|
|
|
|
err = LastError()
|
|
|
|
}
|
2015-06-09 14:26:35 +02:00
|
|
|
// Update Properties member with change made
|
|
|
|
if _, err = d.GetProperty(p); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2015-04-19 23:25:52 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-11-02 11:15:55 +01:00
|
|
|
// SetUserProperty -
|
2017-01-16 14:07:17 +01:00
|
|
|
func (d *Dataset) SetUserProperty(prop, value string) (err error) {
|
|
|
|
if d.list == nil {
|
|
|
|
err = errors.New(msgDatasetIsNil)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
csValue := C.CString(value)
|
|
|
|
csProp := C.CString(prop)
|
2017-06-02 08:42:14 +02:00
|
|
|
errcode := C.dataset_user_prop_set(d.list, csProp, csValue)
|
2017-01-16 14:07:17 +01:00
|
|
|
C.free(unsafe.Pointer(csValue))
|
|
|
|
C.free(unsafe.Pointer(csProp))
|
|
|
|
if errcode != 0 {
|
|
|
|
err = LastError()
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// Clone - clones the dataset. The target must be of the same type as
|
2015-04-19 23:25:52 +02:00
|
|
|
// the source.
|
2015-12-04 23:05:19 +01:00
|
|
|
func (d *Dataset) Clone(target string, props map[Prop]Property) (rd Dataset, err error) {
|
2017-02-23 11:29:17 +01:00
|
|
|
var cprops C.nvlist_ptr
|
2015-04-19 23:25:52 +02:00
|
|
|
if d.list == nil {
|
|
|
|
err = errors.New(msgDatasetIsNil)
|
|
|
|
return
|
|
|
|
}
|
2015-12-04 23:05:19 +01:00
|
|
|
if cprops, err = datasetPropertiesTonvlist(props); err != nil {
|
2015-04-19 23:25:52 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
defer C.nvlist_free(cprops)
|
2016-07-26 21:14:28 +02:00
|
|
|
csTarget := C.CString(target)
|
|
|
|
defer C.free(unsafe.Pointer(csTarget))
|
2017-06-02 08:42:14 +02:00
|
|
|
if errc := C.dataset_clone(d.list, csTarget, cprops); errc != 0 {
|
2015-04-19 23:25:52 +02:00
|
|
|
err = LastError()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
rd, err = DatasetOpen(target)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// DatasetSnapshot create dataset snapshot. Set recur to true to snapshot child datasets.
|
|
|
|
func DatasetSnapshot(path string, recur bool, props map[Prop]Property) (rd Dataset, err error) {
|
2017-02-23 11:29:17 +01:00
|
|
|
var cprops C.nvlist_ptr
|
2015-12-04 23:05:19 +01:00
|
|
|
if cprops, err = datasetPropertiesTonvlist(props); err != nil {
|
2015-04-19 23:25:52 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
defer C.nvlist_free(cprops)
|
2016-07-26 21:14:28 +02:00
|
|
|
csPath := C.CString(path)
|
|
|
|
defer C.free(unsafe.Pointer(csPath))
|
2017-06-02 08:42:14 +02:00
|
|
|
if errc := C.dataset_snapshot(csPath, booleanT(recur), cprops); errc != 0 {
|
2015-04-19 23:25:52 +02:00
|
|
|
err = LastError()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
rd, err = DatasetOpen(path)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// Path return zfs dataset path/name
|
2015-04-19 23:25:52 +02:00
|
|
|
func (d *Dataset) Path() (path string, err error) {
|
|
|
|
if d.list == nil {
|
|
|
|
err = errors.New(msgDatasetIsNil)
|
|
|
|
return
|
|
|
|
}
|
2017-06-02 08:42:14 +02:00
|
|
|
name := C.dataset_get_name(d.list)
|
2015-04-19 23:25:52 +02:00
|
|
|
path = C.GoString(name)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// Rollback rollabck's dataset snapshot
|
2015-04-19 23:25:52 +02:00
|
|
|
func (d *Dataset) Rollback(snap *Dataset, force bool) (err error) {
|
|
|
|
if d.list == nil {
|
|
|
|
err = errors.New(msgDatasetIsNil)
|
|
|
|
return
|
|
|
|
}
|
2017-06-02 08:42:14 +02:00
|
|
|
if errc := C.dataset_rollback(d.list, snap.list, booleanT(force)); errc != 0 {
|
2015-04-19 23:25:52 +02:00
|
|
|
err = LastError()
|
2017-06-02 08:42:14 +02:00
|
|
|
return
|
2015-04-19 23:25:52 +02:00
|
|
|
}
|
2017-06-02 08:42:14 +02:00
|
|
|
d.ReloadProperties()
|
2015-04-19 23:25:52 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-03-17 14:22:17 +01:00
|
|
|
// Promote promotes dataset clone
|
|
|
|
func (d *Dataset) Promote() (err error) {
|
|
|
|
if d.list == nil {
|
|
|
|
err = errors.New(msgDatasetIsNil)
|
|
|
|
return
|
|
|
|
}
|
2017-06-02 08:42:14 +02:00
|
|
|
if errc := C.dataset_promote(d.list); errc != 0 {
|
2017-03-17 14:22:17 +01:00
|
|
|
err = LastError()
|
2017-06-02 08:42:14 +02:00
|
|
|
return
|
2017-03-17 14:22:17 +01:00
|
|
|
}
|
2017-06-02 08:42:14 +02:00
|
|
|
d.ReloadProperties()
|
2017-03-17 14:22:17 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-05-07 22:59:44 +02:00
|
|
|
// Rename dataset
|
2016-07-26 21:14:28 +02:00
|
|
|
func (d *Dataset) Rename(newName string, recur,
|
2015-12-04 23:05:19 +01:00
|
|
|
forceUnmount bool) (err error) {
|
2015-04-19 23:25:52 +02:00
|
|
|
if d.list == nil {
|
|
|
|
err = errors.New(msgDatasetIsNil)
|
|
|
|
return
|
|
|
|
}
|
2016-07-26 21:14:28 +02:00
|
|
|
csNewName := C.CString(newName)
|
|
|
|
defer C.free(unsafe.Pointer(csNewName))
|
2017-06-02 08:42:14 +02:00
|
|
|
if errc := C.dataset_rename(d.list, csNewName,
|
2015-12-04 23:05:19 +01:00
|
|
|
booleanT(recur), booleanT(forceUnmount)); errc != 0 {
|
2015-04-19 23:25:52 +02:00
|
|
|
err = LastError()
|
2017-06-02 08:42:14 +02:00
|
|
|
return
|
2015-04-19 23:25:52 +02:00
|
|
|
}
|
2017-06-02 08:42:14 +02:00
|
|
|
d.ReloadProperties()
|
2015-04-19 23:25:52 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// IsMounted checks to see if the mount is active. If the filesystem is mounted,
|
|
|
|
// sets in 'where' argument the current mountpoint, and returns true. Otherwise,
|
2015-04-19 23:25:52 +02:00
|
|
|
// returns false.
|
|
|
|
func (d *Dataset) IsMounted() (mounted bool, where string) {
|
|
|
|
if d.list == nil {
|
2017-06-02 08:42:14 +02:00
|
|
|
return
|
2015-04-19 23:25:52 +02:00
|
|
|
}
|
2018-11-09 16:08:20 +01:00
|
|
|
Global.Mtx.Lock()
|
|
|
|
defer Global.Mtx.Unlock()
|
2017-06-02 08:42:14 +02:00
|
|
|
mp := C.dataset_is_mounted(d.list)
|
|
|
|
// defer C.free(mp)
|
|
|
|
if mounted = (mp != nil); mounted {
|
|
|
|
where = C.GoString(mp)
|
2017-08-04 13:12:41 +02:00
|
|
|
C.free(unsafe.Pointer(mp))
|
2015-04-19 23:25:52 +02:00
|
|
|
}
|
2017-06-02 08:42:14 +02:00
|
|
|
return
|
2015-04-19 23:25:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Mount the given filesystem.
|
|
|
|
func (d *Dataset) Mount(options string, flags int) (err error) {
|
2018-11-09 16:08:20 +01:00
|
|
|
Global.Mtx.Lock()
|
|
|
|
defer Global.Mtx.Unlock()
|
2015-04-19 23:25:52 +02:00
|
|
|
if d.list == nil {
|
|
|
|
err = errors.New(msgDatasetIsNil)
|
|
|
|
return
|
|
|
|
}
|
2016-07-26 21:14:28 +02:00
|
|
|
csOptions := C.CString(options)
|
|
|
|
defer C.free(unsafe.Pointer(csOptions))
|
2017-06-02 08:42:14 +02:00
|
|
|
if ec := C.dataset_mount(d.list, csOptions, C.int(flags)); ec != 0 {
|
2015-04-19 23:25:52 +02:00
|
|
|
err = LastError()
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unmount the given filesystem.
|
|
|
|
func (d *Dataset) Unmount(flags int) (err error) {
|
|
|
|
if d.list == nil {
|
|
|
|
err = errors.New(msgDatasetIsNil)
|
|
|
|
return
|
|
|
|
}
|
2017-06-02 08:42:14 +02:00
|
|
|
if ec := C.dataset_unmount(d.list, C.int(flags)); ec != 0 {
|
2015-04-19 23:25:52 +02:00
|
|
|
err = LastError()
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// UnmountAll unmount this filesystem and any children inheriting the
|
|
|
|
// mountpoint property.
|
2015-04-19 23:25:52 +02:00
|
|
|
func (d *Dataset) UnmountAll(flags int) (err error) {
|
|
|
|
if d.list == nil {
|
|
|
|
err = errors.New(msgDatasetIsNil)
|
|
|
|
return
|
|
|
|
}
|
2017-08-04 13:12:41 +02:00
|
|
|
// This is implemented recursive because zfs_unmountall() didn't work
|
|
|
|
if len(d.Children) > 0 {
|
|
|
|
for _, c := range d.Children {
|
|
|
|
if err = c.UnmountAll(flags); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2015-04-19 23:25:52 +02:00
|
|
|
}
|
2017-08-04 13:12:41 +02:00
|
|
|
return d.Unmount(flags)
|
2015-04-19 23:25:52 +02:00
|
|
|
}
|
|
|
|
|
2018-11-02 11:15:55 +01:00
|
|
|
// Hold - Adds a single reference, named with the tag argument, to the snapshot.
|
|
|
|
// Each snapshot has its own tag namespace, and tags must be unique within that space.
|
|
|
|
func (d *Dataset) Hold(flag string) (err error) {
|
|
|
|
var path string
|
|
|
|
var pd Dataset
|
|
|
|
if path, err = d.Path(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !strings.Contains(path, "@") {
|
|
|
|
err = fmt.Errorf("'%s' is not a snapshot", path)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
pd, err = DatasetOpenSingle(path[:strings.Index(path, "@")])
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer pd.Close()
|
|
|
|
csSnapName := C.CString(path[strings.Index(path, "@")+1:])
|
|
|
|
defer C.free(unsafe.Pointer(csSnapName))
|
|
|
|
csFlag := C.CString(flag)
|
|
|
|
defer C.free(unsafe.Pointer(csFlag))
|
|
|
|
if 0 != C.zfs_hold(pd.list.zh, csSnapName, csFlag, booleanT(false), -1) {
|
|
|
|
err = LastError()
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Release - Removes a single reference, named with the tag argument, from the specified snapshot.
|
|
|
|
// The tag must already exist for each snapshot. If a hold exists on a snapshot, attempts to destroy
|
|
|
|
// that snapshot by using the zfs destroy command return EBUSY.
|
|
|
|
func (d *Dataset) Release(flag string) (err error) {
|
|
|
|
var path string
|
|
|
|
var pd Dataset
|
|
|
|
if path, err = d.Path(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !strings.Contains(path, "@") {
|
|
|
|
err = fmt.Errorf("'%s' is not a snapshot", path)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
pd, err = DatasetOpenSingle(path[:strings.Index(path, "@")])
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer pd.Close()
|
|
|
|
csSnapName := C.CString(path[strings.Index(path, "@")+1:])
|
|
|
|
defer C.free(unsafe.Pointer(csSnapName))
|
|
|
|
csFlag := C.CString(flag)
|
|
|
|
defer C.free(unsafe.Pointer(csFlag))
|
|
|
|
if 0 != C.zfs_release(pd.list.zh, csSnapName, csFlag, booleanT(false)) {
|
|
|
|
err = LastError()
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Holds - Lists all existing user references for the given snapshot
|
|
|
|
func (d *Dataset) Holds() (tags []HoldTag, err error) {
|
|
|
|
var nvl *C.nvlist_t
|
|
|
|
var nvp *C.nvpair_t
|
|
|
|
var tu64 C.uint64_t
|
|
|
|
var path string
|
|
|
|
if path, err = d.Path(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !strings.Contains(path, "@") {
|
|
|
|
err = fmt.Errorf("'%s' is not a snapshot", path)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if 0 != C.zfs_get_holds(d.list.zh, &nvl) {
|
|
|
|
err = LastError()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer C.nvlist_free(nvl)
|
|
|
|
tags = make([]HoldTag, 0, 5)
|
|
|
|
for nvp = C.nvlist_next_nvpair(nvl, nvp); nvp != nil; {
|
|
|
|
tag := C.nvpair_name(nvp)
|
|
|
|
C.nvpair_value_uint64(nvp, &tu64)
|
|
|
|
tags = append(tags, HoldTag{
|
|
|
|
Name: C.GoString(tag),
|
|
|
|
Timestamp: time.Unix(int64(tu64), 0),
|
|
|
|
})
|
|
|
|
|
|
|
|
nvp = C.nvlist_next_nvpair(nvl, nvp)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:05:19 +01:00
|
|
|
// DatasetPropertyToName convert property to name
|
2015-04-19 23:25:52 +02:00
|
|
|
// ( returns built in string representation of property name).
|
|
|
|
// This is optional, you can represent each property with string
|
|
|
|
// name of choice.
|
2015-12-04 23:05:19 +01:00
|
|
|
func DatasetPropertyToName(p Prop) (name string) {
|
|
|
|
if p == DatasetNumProps {
|
2015-04-19 23:25:52 +02:00
|
|
|
return "numofprops"
|
|
|
|
}
|
|
|
|
prop := C.zfs_prop_t(p)
|
|
|
|
name = C.GoString(C.zfs_prop_to_name(prop))
|
|
|
|
return
|
|
|
|
}
|
2018-12-17 14:01:59 +01:00
|
|
|
|
|
|
|
// DestroyPromote - Same as DestroyRecursive() except it will not destroy
|
|
|
|
// any dependent clones, but promote them first.
|
|
|
|
// This function will navigate any dependency chain
|
|
|
|
// of cloned datasets using breadth first search to promote according and let
|
|
|
|
// you remove dataset regardless of its cloned dependencies.
|
|
|
|
// Note: that this function wan't work when you want to destroy snapshot this way.
|
|
|
|
// However it will destroy all snaphsot of destroyed dataset without dependencies,
|
|
|
|
// otherwise snapshot will move to promoted clone
|
|
|
|
func (d *Dataset) DestroyPromote() (err error) {
|
|
|
|
var snaps []Dataset
|
|
|
|
var clones []string
|
|
|
|
// We need to save list of child snapshots, to destroy them latter
|
|
|
|
// since they will be moved to promoted clone
|
|
|
|
var psnaps []string
|
|
|
|
if clones, err = d.Clones(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(clones) > 0 {
|
|
|
|
var cds Dataset
|
|
|
|
// For this to always work we need to promote youngest clone
|
|
|
|
// in terms of most recent origin snapshot or creation time if
|
|
|
|
// cloned from same snapshot
|
|
|
|
if cds, err = DatasetOpen(clones[0]); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer cds.Close()
|
|
|
|
// since promote will move the snapshots to promoted dataset
|
|
|
|
// we need to check and resolve possible name conflicts
|
|
|
|
if snaps, err = d.Snapshots(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, s := range snaps {
|
|
|
|
spath := s.Properties[DatasetPropName].Value
|
|
|
|
sname := spath[strings.Index(spath, "@"):]
|
|
|
|
// conflict and resolve
|
|
|
|
if ok, _ := cds.FindSnapshotName(sname); ok {
|
|
|
|
// snapshot with the same name already exist
|
|
|
|
volname := path.Base(spath[:strings.Index(spath, "@")])
|
|
|
|
sname = sname + "." + volname
|
|
|
|
if err = s.Rename(spath+"."+volname, false, true); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
psnaps = append(psnaps, sname)
|
|
|
|
}
|
|
|
|
if err = cds.Promote(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// destroy child datasets, since this works recursive
|
|
|
|
for _, cd := range d.Children {
|
|
|
|
if err = cd.DestroyPromote(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
d.Children = make([]Dataset, 0)
|
|
|
|
if err = d.Destroy(false); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Load with new promoted snapshots
|
|
|
|
if len(clones) > 0 && len(psnaps) > 0 {
|
|
|
|
var cds Dataset
|
|
|
|
if cds, err = DatasetOpen(clones[0]); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer cds.Close()
|
|
|
|
// try to destroy (promoted) snapshots now
|
|
|
|
for _, sname := range psnaps {
|
|
|
|
if ok, snap := cds.FindSnapshotName(sname); ok {
|
|
|
|
snap.Destroy(false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Snapshots - filter and return all snapshots of dataset
|
|
|
|
func (d *Dataset) Snapshots() (snaps []Dataset, err error) {
|
|
|
|
for _, ch := range d.Children {
|
|
|
|
if !ch.IsSnapshot() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
snaps = append(snaps, ch)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// FindSnapshot - returns true if given path is one of dataset snaphsots
|
|
|
|
func (d *Dataset) FindSnapshot(path string) (ok bool, snap Dataset) {
|
|
|
|
for _, ch := range d.Children {
|
|
|
|
if !ch.IsSnapshot() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if ok = (path == ch.Properties[DatasetPropName].Value); ok {
|
|
|
|
snap = ch
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// FindSnapshotName - returns true and snapshot if given snapshot
|
|
|
|
// name eg. '@snap1' is one of dataset snaphsots
|
|
|
|
func (d *Dataset) FindSnapshotName(name string) (ok bool, snap Dataset) {
|
|
|
|
return d.FindSnapshot(d.Properties[DatasetPropName].Value + name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clones - get list of all dataset paths cloned from this
|
|
|
|
// dataset or this snapshot
|
|
|
|
// List is sorted descedent by origin snapshot order
|
|
|
|
func (d *Dataset) Clones() (clones []string, err error) {
|
|
|
|
// Clones can only live on same pool
|
|
|
|
var root Dataset
|
|
|
|
var sortDesc []Dataset
|
|
|
|
if root, err = DatasetOpen(d.PoolName()); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer root.Close()
|
|
|
|
dIsSnapshot := d.IsSnapshot()
|
|
|
|
// USe breadth first search to find all clones
|
|
|
|
queue := make(chan Dataset, 1024)
|
|
|
|
defer close(queue) // This will close and cleanup all
|
|
|
|
queue <- root // start from the root element
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ds := <-queue: // pull from queue (breadth first search)
|
|
|
|
for _, ch := range ds.Children {
|
|
|
|
origin := ch.Properties[DatasetPropOrigin].Value
|
|
|
|
if len(origin) > 0 {
|
|
|
|
if dIsSnapshot && origin == d.Properties[DatasetPropName].Value {
|
|
|
|
// if this dataset is snaphot
|
|
|
|
ch.Properties[DatasetNumProps+1000] = d.Properties[DatasetPropCreateTXG]
|
|
|
|
sortDesc = append(sortDesc, ch)
|
|
|
|
} else {
|
|
|
|
// Check if origin of this dataset is one of snapshots
|
|
|
|
ok, snap := d.FindSnapshot(origin)
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ch.Properties[DatasetNumProps+1000] = snap.Properties[DatasetPropCreateTXG]
|
|
|
|
sortDesc = append(sortDesc, ch)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
queue <- ch
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
sort.Sort(clonesCreateDesc(sortDesc))
|
|
|
|
// This way we get clones ordered from most recent sanpshots first
|
|
|
|
for _, c := range sortDesc {
|
|
|
|
clones = append(clones, c.Properties[DatasetPropName].Value)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|