DestroyPromote - recursive destroy dataset, but don’t destroy any dependent clones, promote them first

- Clones get list of clones (paths) made from given dataset or snapshot
 - Get list of snapshots
 - Find snapshot (search for snapshot by name in children list)
 - IsSnapshot change signature (for simplification, hope it wan’t break lot of code)
This commit is contained in:
Faruk Kasumovic 2018-12-17 14:01:59 +01:00
parent 7482756d85
commit 57e42e824f
3 changed files with 292 additions and 6 deletions

63
destroy_test.go Normal file
View File

@ -0,0 +1,63 @@
package zfs_test
import (
"testing"
zfs "github.com/bicomsystems/go-libzfs"
)
func TestDataset_DestroyPromote(t *testing.T) {
zpoolTestPoolCreate(t)
// defer zpoolTestPoolDestroy(t)
var c1, c2 zfs.Dataset
props := make(map[zfs.Prop]zfs.Property)
d, err := zfs.DatasetCreate(TSTPoolName+"/original",
zfs.DatasetTypeFilesystem, make(map[zfs.Prop]zfs.Property))
if err != nil {
t.Errorf("DatasetCreate(\"%s/original\") error: %v", TSTPoolName, err)
return
}
s1, _ := zfs.DatasetSnapshot(d.Properties[zfs.DatasetPropName].Value+"@snap2", false, props)
s2, _ := zfs.DatasetSnapshot(d.Properties[zfs.DatasetPropName].Value+"@snap1", false, props)
c1, err = s1.Clone(TSTPoolName+"/clone1", nil)
if err != nil {
t.Errorf("d.Clone(\"%s/clone1\", props)) error: %v", TSTPoolName, err)
d.Close()
return
}
zfs.DatasetSnapshot(c1.Properties[zfs.DatasetPropName].Value+"@snap1", false, props)
c2, err = s2.Clone(TSTPoolName+"/clone2", nil)
if err != nil {
t.Errorf("c1.Clone(\"%s/clone1\", props)) error: %v", TSTPoolName, err)
d.Close()
c1.Close()
return
}
s2.Close()
zfs.DatasetSnapshot(c2.Properties[zfs.DatasetPropName].Value+"@snap0", false, props)
c1.Close()
c2.Close()
// reopen pool
d.Close()
if d, err = zfs.DatasetOpen(TSTPoolName + "/original"); err != nil {
t.Error("zfs.DatasetOpen")
return
}
if err = d.DestroyPromote(); err != nil {
t.Errorf("DestroyPromote error: %v", err)
d.Close()
return
}
t.Log("Destroy promote completed with success")
d.Close()
zpoolTestPoolDestroy(t)
}

50
sort.go Normal file
View File

@ -0,0 +1,50 @@
package zfs
import (
"strconv"
)
type clonesCreateDesc []Dataset
func (list clonesCreateDesc) Less(i, j int) bool {
_, oki := list[i].Properties[DatasetNumProps+1000]
_, okj := list[i].Properties[DatasetNumProps+1000]
if oki && okj {
unixti, err := strconv.ParseInt(
list[i].Properties[DatasetNumProps+1000].Value, 10, 64)
if err != nil {
panic(err)
}
unixtj, err := strconv.ParseInt(
list[j].Properties[DatasetNumProps+1000].Value, 10, 64)
if err != nil {
panic(err)
}
if unixti != unixtj {
return unixti > unixtj
}
}
// if we have two datasets created from same snapshot
// any of them will do, but we will go for most recent
unixti, err := strconv.ParseInt(
list[i].Properties[DatasetPropCreateTXG].Value, 10, 64)
if err != nil {
panic(err)
}
unixtj, err := strconv.ParseInt(
list[j].Properties[DatasetPropCreateTXG].Value, 10, 64)
if err != nil {
panic(err)
}
return unixti > unixtj
}
func (list clonesCreateDesc) Swap(i, j int) {
list[i], list[j] = list[j], list[i]
}
func (list clonesCreateDesc) Len() int {
return len(list)
}

185
zfs.go
View File

@ -10,6 +10,8 @@ import "C"
import (
"errors"
"fmt"
"path"
"sort"
"strings"
"time"
"unsafe"
@ -191,6 +193,13 @@ func (d *Dataset) Close() {
}
}
// reOpen - close and open dataset. Not thread safe!
func (d *Dataset) reOpen() (err error) {
d.Close()
*d, err = DatasetOpen(d.Properties[DatasetPropName].Value)
return
}
// Destroy destroys the dataset. The caller must make sure that the filesystem
// isn't mounted, and that there are no active dependents. Set Defer argument
// to true to defer destruction for when dataset is not in use. Call Close() to
@ -220,12 +229,9 @@ func (d *Dataset) Destroy(Defer bool) (err error) {
}
// IsSnapshot - retrun true if datset is snapshot
func (d *Dataset) IsSnapshot() (ok bool, err error) {
var path string
if path, err = d.Path(); err != nil {
return
}
ok = d.Type == DatasetTypeSnapshot || strings.Contains(path, "@")
func (d *Dataset) IsSnapshot() (ok bool) {
path := d.Properties[DatasetPropName].Value
ok = (d.Type == DatasetTypeSnapshot || strings.Contains(path, "@"))
return
}
@ -294,6 +300,16 @@ func (d *Dataset) Pool() (p Pool, err error) {
return
}
// PoolName - return name of the pool
func (d *Dataset) PoolName() string {
path := d.Properties[DatasetPropName].Value
i := strings.Index(path, "/")
if i < 0 {
return path
}
return path[0:i]
}
// ReloadProperties re-read dataset's properties
func (d *Dataset) ReloadProperties() (err error) {
if d.list == nil {
@ -652,3 +668,160 @@ func DatasetPropertyToName(p Prop) (name string) {
name = C.GoString(C.zfs_prop_to_name(prop))
return
}
// DestroyPromote - Same as DestroyRecursive() except it will not destroy
// any dependent clones, but promote them first.
// This function will navigate any dependency chain
// of cloned datasets using breadth first search to promote according and let
// you remove dataset regardless of its cloned dependencies.
// Note: that this function wan't work when you want to destroy snapshot this way.
// However it will destroy all snaphsot of destroyed dataset without dependencies,
// otherwise snapshot will move to promoted clone
func (d *Dataset) DestroyPromote() (err error) {
var snaps []Dataset
var clones []string
// We need to save list of child snapshots, to destroy them latter
// since they will be moved to promoted clone
var psnaps []string
if clones, err = d.Clones(); err != nil {
return
}
if len(clones) > 0 {
var cds Dataset
// For this to always work we need to promote youngest clone
// in terms of most recent origin snapshot or creation time if
// cloned from same snapshot
if cds, err = DatasetOpen(clones[0]); err != nil {
return
}
defer cds.Close()
// since promote will move the snapshots to promoted dataset
// we need to check and resolve possible name conflicts
if snaps, err = d.Snapshots(); err != nil {
return
}
for _, s := range snaps {
spath := s.Properties[DatasetPropName].Value
sname := spath[strings.Index(spath, "@"):]
// conflict and resolve
if ok, _ := cds.FindSnapshotName(sname); ok {
// snapshot with the same name already exist
volname := path.Base(spath[:strings.Index(spath, "@")])
sname = sname + "." + volname
if err = s.Rename(spath+"."+volname, false, true); err != nil {
return
}
}
psnaps = append(psnaps, sname)
}
if err = cds.Promote(); err != nil {
return
}
}
// destroy child datasets, since this works recursive
for _, cd := range d.Children {
if err = cd.DestroyPromote(); err != nil {
return
}
}
d.Children = make([]Dataset, 0)
if err = d.Destroy(false); err != nil {
return
}
// Load with new promoted snapshots
if len(clones) > 0 && len(psnaps) > 0 {
var cds Dataset
if cds, err = DatasetOpen(clones[0]); err != nil {
return
}
defer cds.Close()
// try to destroy (promoted) snapshots now
for _, sname := range psnaps {
if ok, snap := cds.FindSnapshotName(sname); ok {
snap.Destroy(false)
}
}
}
return
}
// Snapshots - filter and return all snapshots of dataset
func (d *Dataset) Snapshots() (snaps []Dataset, err error) {
for _, ch := range d.Children {
if !ch.IsSnapshot() {
continue
}
snaps = append(snaps, ch)
}
return
}
// FindSnapshot - returns true if given path is one of dataset snaphsots
func (d *Dataset) FindSnapshot(path string) (ok bool, snap Dataset) {
for _, ch := range d.Children {
if !ch.IsSnapshot() {
continue
}
if ok = (path == ch.Properties[DatasetPropName].Value); ok {
snap = ch
break
}
}
return
}
// FindSnapshotName - returns true and snapshot if given snapshot
// name eg. '@snap1' is one of dataset snaphsots
func (d *Dataset) FindSnapshotName(name string) (ok bool, snap Dataset) {
return d.FindSnapshot(d.Properties[DatasetPropName].Value + name)
}
// Clones - get list of all dataset paths cloned from this
// dataset or this snapshot
// List is sorted descedent by origin snapshot order
func (d *Dataset) Clones() (clones []string, err error) {
// Clones can only live on same pool
var root Dataset
var sortDesc []Dataset
if root, err = DatasetOpen(d.PoolName()); err != nil {
return
}
defer root.Close()
dIsSnapshot := d.IsSnapshot()
// USe breadth first search to find all clones
queue := make(chan Dataset, 1024)
defer close(queue) // This will close and cleanup all
queue <- root // start from the root element
for {
select {
case ds := <-queue: // pull from queue (breadth first search)
for _, ch := range ds.Children {
origin := ch.Properties[DatasetPropOrigin].Value
if len(origin) > 0 {
if dIsSnapshot && origin == d.Properties[DatasetPropName].Value {
// if this dataset is snaphot
ch.Properties[DatasetNumProps+1000] = d.Properties[DatasetPropCreateTXG]
sortDesc = append(sortDesc, ch)
} else {
// Check if origin of this dataset is one of snapshots
ok, snap := d.FindSnapshot(origin)
if !ok {
continue
}
ch.Properties[DatasetNumProps+1000] = snap.Properties[DatasetPropCreateTXG]
sortDesc = append(sortDesc, ch)
}
}
queue <- ch
}
default:
sort.Sort(clonesCreateDesc(sortDesc))
// This way we get clones ordered from most recent sanpshots first
for _, c := range sortDesc {
clones = append(clones, c.Properties[DatasetPropName].Value)
}
return
}
}
return
}