- Prevent double-free failure when the same dataset is closed multiple times

In some cases, copying dataset object, either by passing by value or in
some other pattern can lead to code that will close the same dataset
multiple times (e.g. defer). Or otherwise, code with complicated
handling of dataset cleanup.
Fixes: #30, Closes: #31
This commit is contained in:
Faruk Kasumovic 2020-09-09 10:22:37 +02:00
parent 768fbf7f22
commit 039b070d1a
5 changed files with 62 additions and 13 deletions

View File

@ -34,6 +34,7 @@ func Test(t *testing.T) {
zfsTestDatasetSetProperty(t) zfsTestDatasetSetProperty(t)
zfsTestDatasetHoldRelease(t) zfsTestDatasetHoldRelease(t)
zfsTestDoubleFreeOnDestroy(t)
zfsTestDatasetDestroy(t) zfsTestDatasetDestroy(t)
zpoolTestPoolDestroy(t) zpoolTestPoolDestroy(t)

View File

@ -60,4 +60,5 @@ func TestDataset_DestroyPromote(t *testing.T) {
t.Log("Destroy promote completed with success") t.Log("Destroy promote completed with success")
d.Close() d.Close()
zpoolTestPoolDestroy(t) zpoolTestPoolDestroy(t)
cleanupVDisks()
} }

29
zfs.go
View File

@ -13,6 +13,7 @@ import (
"path" "path"
"sort" "sort"
"strings" "strings"
"sync"
"time" "time"
"unsafe" "unsafe"
) )
@ -49,6 +50,7 @@ type HoldTag struct {
// Dataset - ZFS dataset object // Dataset - ZFS dataset object
type Dataset struct { type Dataset struct {
list C.dataset_list_ptr list C.dataset_list_ptr
closeOnce *sync.Once
Type DatasetType Type DatasetType
Properties map[Prop]Property Properties map[Prop]Property
Children []Dataset Children []Dataset
@ -58,7 +60,7 @@ func (d *Dataset) openChildren() (err error) {
d.Children = make([]Dataset, 0, 5) d.Children = make([]Dataset, 0, 5)
list := C.dataset_list_children(d.list) list := C.dataset_list_children(d.list)
for list != nil { for list != nil {
dataset := Dataset{list: list} dataset := Dataset{list: list, closeOnce: new(sync.Once)}
dataset.Type = DatasetType(C.dataset_type(list)) dataset.Type = DatasetType(C.dataset_type(list))
dataset.Properties = make(map[Prop]Property) dataset.Properties = make(map[Prop]Property)
err = dataset.ReloadProperties() err = dataset.ReloadProperties()
@ -79,16 +81,20 @@ func (d *Dataset) openChildren() (err error) {
// DatasetOpenAll recursive get handles to all available datasets on system // DatasetOpenAll recursive get handles to all available datasets on system
// (file-systems, volumes or snapshots). // (file-systems, volumes or snapshots).
func DatasetOpenAll() (datasets []Dataset, err error) { func DatasetOpenAll() (datasets []Dataset, err error) {
var dataset Dataset list := C.dataset_list_root()
dataset.list = C.dataset_list_root() for list != nil {
for dataset.list != nil { dataset := Dataset{
dataset.Type = DatasetType(C.dataset_type(dataset.list)) list: list,
closeOnce: new(sync.Once),
Type: DatasetType(C.dataset_type(list)),
}
dataset.Type = DatasetType(C.dataset_type(list))
err = dataset.ReloadProperties() err = dataset.ReloadProperties()
if err != nil { if err != nil {
return return
} }
datasets = append(datasets, dataset) datasets = append(datasets, dataset)
dataset.list = C.dataset_next(dataset.list) list = C.dataset_next(list)
} }
for ci := range datasets { for ci := range datasets {
if err = datasets[ci].openChildren(); err != nil { if err = datasets[ci].openChildren(); err != nil {
@ -130,6 +136,7 @@ func DatasetOpenSingle(path string) (d Dataset, err error) {
err = fmt.Errorf("%s - %s", err.Error(), path) err = fmt.Errorf("%s - %s", err.Error(), path)
return return
} }
d.closeOnce = new(sync.Once)
d.Type = DatasetType(C.dataset_type(d.list)) d.Type = DatasetType(C.dataset_type(d.list))
err = d.ReloadProperties() err = d.ReloadProperties()
if err != nil { if err != nil {
@ -182,11 +189,13 @@ func DatasetCreate(path string, dtype DatasetType,
// Close close dataset and all its recursive children datasets (close handle // Close close dataset and all its recursive children datasets (close handle
// and cleanup dataset object/s from memory) // and cleanup dataset object/s from memory)
func (d *Dataset) Close() { func (d *Dataset) Close() {
// path, _ := d.Path() // if dataset was ever open
Global.Mtx.Lock() if d.closeOnce != nil {
C.dataset_list_close(d.list) d.closeOnce.Do(func() {
C.dataset_list_close(d.list)
})
}
d.list = nil d.list = nil
Global.Mtx.Unlock()
for _, cd := range d.Children { for _, cd := range d.Children {
cd.Close() cd.Close()
} }

View File

@ -338,3 +338,35 @@ func ExampleDatasetOpenAll() {
} }
} }
func CopyAndDestroy(d *zfs.Dataset) (err error) {
if err = d.Destroy(false); err != nil {
return
}
d.Close()
return
}
func zfsTestDoubleFreeOnDestroy(t *testing.T) {
TSTDestroyPath := TSTPoolName + "/DESTROY"
println("TEST Doble Free On Destroy( ", TSTVolumePath, " ) ... ")
props := make(map[zfs.Prop]zfs.Property)
d, err := zfs.DatasetCreate(TSTDestroyPath, zfs.DatasetTypeFilesystem, props)
if err != nil {
t.Error(err)
return
}
d.Close()
d, err = zfs.DatasetOpen(TSTDestroyPath)
if err != nil {
t.Error(err)
return
}
defer d.Close()
if err = CopyAndDestroy(&d); err != nil {
t.Error(err)
return
}
print("PASS\n\n")
}

View File

@ -51,12 +51,18 @@ func createTestpoolVdisks() (err error) {
return return
} }
func removeVDisk(path string) {
if err := os.Remove(path); err != nil {
println("Error: ", err.Error())
}
}
// Cleanup sparse files used for tests // Cleanup sparse files used for tests
func cleanupVDisks() { func cleanupVDisks() {
// try cleanup // try cleanup
os.Remove(s1path) removeVDisk(s1path)
os.Remove(s2path) removeVDisk(s2path)
os.Remove(s3path) removeVDisk(s3path)
} }
/* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */