From e67132bb04baf9453ef7ab0c3feea2cc48240bb3 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Sun, 19 Apr 2015 23:33:10 +0200 Subject: [PATCH 01/36] - Added list of main features to README, and small aperance improvements Make LICENSE more easy to read on GitHub --- .gitignore | 1 + LICENSE | 26 +++++++++++++++++++++----- README.md | 19 ++++++++++++++----- 3 files changed, 36 insertions(+), 10 deletions(-) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5ef4730 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +.gitconfig diff --git a/LICENSE b/LICENSE index a557761..dfc205d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,12 +1,28 @@ Copyright (c) 2015, Faruk Kasumovic All rights reserved. -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. -3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. +3. Neither the name of the copyright holder nor the names of its contributors +may be used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md index d83a88f..c4ca12b 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,30 @@ # Introduction -Currently implements basic manipulation of ZFS pools and data sets. Plan is to add more in further development, improve documentation with more examples, and add more tests. _go-libzfs_ use libzfs C library and does not wrap OpenZFS CLI tools. That way it ensure best performance. Per my personal opinion its more reliable way to do it, and that libzfs is less subject of possible changes then CLI tools. Goal is to let easy using and manipulating OpenZFS form with in go, and tries to map libzfs C library in to go style package respecting golang common practice. +**go-libzfs** currently implements basic manipulation of ZFS pools and data sets. Plan is to add more in further development, improve documentation with more examples, and add more tests. _go-libzfs_ use libzfs C library and does not wrap OpenZFS CLI tools. That way it ensure best performance. Per my personal opinion its more reliable way to do it, and that libzfs is less subject of possible changes then CLI tools. Goal is to let easy using and manipulating OpenZFS form with in go, and tries to map libzfs C library in to go style package respecting golang common practice. [![GoDoc](https://godoc.org/github.com/fkasumovic/go-libzfs?status.svg)](https://godoc.org/github.com/fkasumovic/go-libzfs) -# Requirements: +## Main features + +- Creating, destroying, importing and exporting pools. +- Reading and modifying pool properties. +- Creating, destroying and renaming of filesystem datasets and volumes. +- Creating, destroying and rollback of snapshots. +- Cloning datasets and volumes. +- Reading and modifying dataset and volume properties. + +## Requirements: - OpenZFS and libzfs with development headers installed. - Developed using go1.4.2 -# Installing +## Installing ```sh go get github.com/fkasumovic/go-libzfs ``` -# Testing +## Testing ```sh # On command line shell run @@ -23,7 +32,7 @@ cd $GOPATH/src/github.com/fkasumovic/go-libzfs go test ``` -# Usage example +## Usage example ```go // Create map to represent ZFS dataset properties. This is equivalent to From 5405c7568a62a80304f3364abf1faa29846ba92e Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Mon, 20 Apr 2015 10:49:01 +0200 Subject: [PATCH 02/36] - Update link to godoc in README to reflect change of project location. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c4ca12b..ba8fe33 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ **go-libzfs** currently implements basic manipulation of ZFS pools and data sets. Plan is to add more in further development, improve documentation with more examples, and add more tests. _go-libzfs_ use libzfs C library and does not wrap OpenZFS CLI tools. That way it ensure best performance. Per my personal opinion its more reliable way to do it, and that libzfs is less subject of possible changes then CLI tools. Goal is to let easy using and manipulating OpenZFS form with in go, and tries to map libzfs C library in to go style package respecting golang common practice. -[![GoDoc](https://godoc.org/github.com/fkasumovic/go-libzfs?status.svg)](https://godoc.org/github.com/fkasumovic/go-libzfs) +[![GoDoc](https://godoc.org/github.com/bicomsystems/go-libzfs?status.svg)](https://godoc.org/github.com/bicomsystems/go-libzfs) ## Main features From fe306ffc2838c88b15f00b8196adc34b584eefa1 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Mon, 20 Apr 2015 10:55:04 +0200 Subject: [PATCH 03/36] - More updates to README to reflect change of project location --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ba8fe33..adea694 100644 --- a/README.md +++ b/README.md @@ -21,14 +21,14 @@ ## Installing ```sh -go get github.com/fkasumovic/go-libzfs +go get github.com/bicomsystems/go-libzfs ``` ## Testing ```sh # On command line shell run -cd $GOPATH/src/github.com/fkasumovic/go-libzfs +cd $GOPATH/src/github.com/bicomsystems/go-libzfs go test ``` From 65eb260be4a64ffe5c2e84f0196f65179b6e33fa Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Thu, 7 May 2015 22:59:44 +0200 Subject: [PATCH 04/36] - Make dataset destroy returns meaningful error if dataset contains children - Implementation of recursive dataset destroy DestroyRecursive() --- .gitignore | 1 + zfs.go | 34 +++++++++++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 5ef4730..cf468c9 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ .gitconfig +*.sublime-* diff --git a/zfs.go b/zfs.go index e19c67b..6b07fba 100644 --- a/zfs.go +++ b/zfs.go @@ -156,7 +156,20 @@ func (d *Dataset) Close() { } } +// Destroys the dataset. The caller must make sure that the filesystem +// isn't mounted, and that there are no active dependents. Set Defer argument +// to true to defer destruction for when dataset is not in use. func (d *Dataset) Destroy(Defer bool) (err error) { + if len(d.Children) > 0 { + path, e := d.Path() + if e != nil { + return + } + dsType, e := d.GetProperty(ZFSPropType) + err = errors.New("Cannot destroy dataset " + path + + ": " + dsType.Value + " has children") + return + } if d.list != nil { if ec := C.zfs_destroy(d.list.zh, boolean_t(Defer)); ec != 0 { err = LastError() @@ -167,6 +180,23 @@ func (d *Dataset) Destroy(Defer bool) (err error) { return } +// Recursively destroy children of dataset and dataset. +func (d *Dataset) DestroyRecursive() (err error) { + if len(d.Children) > 0 { + for _, c := range d.Children { + if err = c.DestroyRecursive(); err != nil { + return + } + // close handle to destroyed child dataset + c.Close() + } + // clear closed children array + d.Children = make([]Dataset, 0) + } + err = d.Destroy(false) + return +} + func (d *Dataset) Pool() (p Pool, err error) { if d.list == nil { err = errors.New(msgDatasetIsNil) @@ -259,7 +289,7 @@ func (d *Dataset) Clone(target string, props map[ZFSProp]Property) (rd Dataset, return } -// Create dataset snapshot +// Create dataset snapshot. Set recur to true to snapshot child datasets. func DatasetSnapshot(path string, recur bool, props map[ZFSProp]Property) (rd Dataset, err error) { var cprops *C.nvlist_t if cprops, err = datasetPropertiesTo_nvlist(props); err != nil { @@ -285,6 +315,7 @@ func (d *Dataset) Path() (path string, err error) { return } +// Rollabck dataset snapshot func (d *Dataset) Rollback(snap *Dataset, force bool) (err error) { if d.list == nil { err = errors.New(msgDatasetIsNil) @@ -297,6 +328,7 @@ func (d *Dataset) Rollback(snap *Dataset, force bool) (err error) { return } +// Rename dataset func (d *Dataset) Rename(newname string, recur, force_umount bool) (err error) { if d.list == nil { From c0b415fe50448d46116a4895112e49d46b96336a Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Wed, 3 Jun 2015 22:42:48 +0200 Subject: [PATCH 05/36] - Improved existing tests. Test implementation is separated per datasets and pools to avoid one big complex file. Tests are dependent so order of execution is forced by adding a_test.go running test with sub tests in required order. --- a_test.go | 23 +++++ zfs_test.go | 171 +++++++++++++++++++++++++++++++++ zpool_test.go | 261 +++++++++++++------------------------------------- 3 files changed, 263 insertions(+), 192 deletions(-) create mode 100644 a_test.go create mode 100644 zfs_test.go diff --git a/a_test.go b/a_test.go new file mode 100644 index 0000000..fae4a3c --- /dev/null +++ b/a_test.go @@ -0,0 +1,23 @@ +package zfs_test + +import ( + "testing" +) + +/* ------------------------------------------------------------------------- */ +// TESTS ARE DEPENDED AND MUST RUN IN DEPENDENT ORDER + +func Test(t *testing.T) { + zpoolTestPoolCreate(t) + zpoolTestPoolOpenAll(t) + zpoolTestFailPoolOpen(t) + + zfsTestDatasetCreate(t) + zfsTestDatasetOpen(t) + zfsTestDatasetSnapshot(t) + zfsTestDatasetOpenAll(t) + + zfsTestDatasetDestroy(t) + + zpoolTestPoolDestroy(t) +} diff --git a/zfs_test.go b/zfs_test.go new file mode 100644 index 0000000..5f82b3a --- /dev/null +++ b/zfs_test.go @@ -0,0 +1,171 @@ +package zfs_test + +import ( + "fmt" + "github.com/bicomsystems/go-libzfs" + "testing" +) + +/* ------------------------------------------------------------------------- */ +// HELPERS: +var TST_DATASET_PATH = TST_POOL_NAME + "/DATASET" +var TST_VOLUME_PATH = TST_DATASET_PATH + "/VOLUME" +var TST_DATASET_PATH_SNAP = TST_DATASET_PATH + "@test" + +func printDatasets(ds []zfs.Dataset) error { + for _, d := range ds { + + path, err := d.Path() + if err != nil { + return err + } + p, err := d.GetProperty(zfs.ZFSPropType) + if err != nil { + return err + } + fmt.Printf(" %30s | %10s\n", path, + p.Value) + if len(d.Children) > 0 { + printDatasets(d.Children) + } + } + return nil +} + +/* ------------------------------------------------------------------------- */ +// TESTS: + +func zfsTestDatasetCreate(t *testing.T) { + // reinit names used in case TESTPOOL was in conflict + TST_DATASET_PATH = TST_POOL_NAME + "/DATASET" + TST_VOLUME_PATH = TST_DATASET_PATH + "/VOLUME" + TST_DATASET_PATH_SNAP = TST_DATASET_PATH + "@test" + + println("TEST DatasetCreate(", TST_DATASET_PATH, ") (filesystem) ... ") + props := make(map[zfs.ZFSProp]zfs.Property) + d, err := zfs.DatasetCreate(TST_DATASET_PATH, zfs.DatasetTypeFilesystem, props) + if err != nil { + t.Error(err) + return + } + d.Close() + println("PASS\n") + + strSize := "536870912" // 512M + + println("TEST DatasetCreate(", TST_VOLUME_PATH, ") (volume) ... ") + props[zfs.ZFSPropVolsize] = zfs.Property{Value: strSize} + // In addition I explicitly choose some more properties to be set. + props[zfs.ZFSPropVolblocksize] = zfs.Property{Value: "4096"} + props[zfs.ZFSPropReservation] = zfs.Property{Value: strSize} + d, err = zfs.DatasetCreate(TST_VOLUME_PATH, zfs.DatasetTypeVolume, props) + if err != nil { + t.Error(err) + return + } + d.Close() + println("PASS\n") +} + +func zfsTestDatasetOpen(t *testing.T) { + println("TEST DatasetOpen(", TST_DATASET_PATH, ") ... ") + d, err := zfs.DatasetOpen(TST_DATASET_PATH) + if err != nil { + t.Error(err) + return + } + d.Close() + println("PASS\n") +} + +func zfsTestDatasetOpenAll(t *testing.T) { + println("TEST DatasetOpenAll()/DatasetCloseAll() ... ") + ds, err := zfs.DatasetOpenAll() + if err != nil { + t.Error(err) + return + } + if err = printDatasets(ds); err != nil { + zfs.DatasetCloseAll(ds) + t.Error(err) + return + } + zfs.DatasetCloseAll(ds) + println("PASS\n") +} + +func zfsTestDatasetSnapshot(t *testing.T) { + println("TEST DatasetSnapshot(", TST_DATASET_PATH, ", true, ...) ... ") + props := make(map[zfs.ZFSProp]zfs.Property) + d, err := zfs.DatasetSnapshot(TST_DATASET_PATH_SNAP, true, props) + if err != nil { + t.Error(err) + return + } + defer d.Close() + println("PASS\n") +} + +func zfsTestDatasetDestroy(t *testing.T) { + println("TEST DATASET Destroy( ", TST_DATASET_PATH, " ) ... ") + d, err := zfs.DatasetOpen(TST_DATASET_PATH) + if err != nil { + t.Error(err) + return + } + defer d.Close() + if err = d.DestroyRecursive(); err != nil { + t.Error(err) + return + } + println("PASS\n") +} + +/* ------------------------------------------------------------------------- */ +// EXAMPLES: + +// Example of creating ZFS volume +func ExampleDatasetCreate() { + // Create map to represent ZFS dataset properties. This is equivalent to + // list of properties you can get from ZFS CLI tool, and some more + // internally used by libzfs. + props := make(map[zfs.ZFSProp]zfs.Property) + + // I choose to create (block) volume 1GiB in size. Size is just ZFS dataset + // property and this is done as map of strings. So, You have to either + // specify size as base 10 number in string, or use strconv package or + // similar to convert in to string (base 10) from numeric type. + strSize := "1073741824" + + props[zfs.ZFSPropVolsize] = zfs.Property{Value: strSize} + // In addition I explicitly choose some more properties to be set. + props[zfs.ZFSPropVolblocksize] = zfs.Property{Value: "4096"} + props[zfs.ZFSPropReservation] = zfs.Property{Value: strSize} + + // Lets create desired volume + d, err := zfs.DatasetCreate("TESTPOOL/VOLUME1", zfs.DatasetTypeVolume, props) + if err != nil { + println(err.Error()) + return + } + // Dataset have to be closed for memory cleanup + defer d.Close() + + println("Created zfs volume TESTPOOL/VOLUME1") +} + +func ExampleDatasetOpen() { + // Open dataset and read its available space + d, err := zfs.DatasetOpen("TESTPOOL/DATASET1") + if err != nil { + panic(err.Error()) + return + } + defer d.Close() + var p zfs.Property + if p, err = d.GetProperty(zfs.ZFSPropAvailable); err != nil { + panic(err.Error()) + return + } + println(d.PropertyToName(zfs.ZFSPropAvailable), " = ", p.Value) +} diff --git a/zpool_test.go b/zpool_test.go index d5b1afa..3a19add 100644 --- a/zpool_test.go +++ b/zpool_test.go @@ -1,17 +1,17 @@ -package zfs +package zfs_test import ( "fmt" + "github.com/bicomsystems/go-libzfs" "io/ioutil" "os" - "strconv" "testing" ) -const ( - TST_POOL_NAME = "TESTPOOL" - TST_DATASET_PATH = "TESTPOOL/DATASET" -) +/* ------------------------------------------------------------------------- */ +// HELPERS: + +var TST_POOL_NAME = "TESTPOOL" func CreateTmpSparse(prefix string, size int64) (path string, err error) { sf, err := ioutil.TempFile("/tmp", prefix) @@ -26,9 +26,24 @@ func CreateTmpSparse(prefix string, size int64) (path string, err error) { return } -// Create 3 sparse file 5G in /tmp directory each 5G size, and use them to create mirror TESTPOOL with one spare "disk" -func TestPoolCreate(t *testing.T) { - print("TEST PoolCreate ... ") +/* ------------------------------------------------------------------------- */ +// TESTS: + +// Create 3 sparse file in /tmp directory each 5G size, and use them to create +// mirror TESTPOOL with one spare "disk" +func zpoolTestPoolCreate(t *testing.T) { + println("TEST PoolCreate ... ") + // first check if pool with same name already exist + // we don't want conflict + for { + p, err := zfs.PoolOpen(TST_POOL_NAME) + if err != nil { + break + } + p.Close() + TST_POOL_NAME += "0" + } + var s1path, s2path, s3path string var err error if s1path, err = CreateTmpSparse("zfs_test_", 0x140000000); err != nil { @@ -50,27 +65,27 @@ func TestPoolCreate(t *testing.T) { } disks := [2]string{s1path, s2path} - var vdevs, mdevs, sdevs []VDevSpec + var vdevs, mdevs, sdevs []zfs.VDevSpec for _, d := range disks { mdevs = append(mdevs, - VDevSpec{Type: VDevTypeFile, Path: d}) + zfs.VDevSpec{Type: zfs.VDevTypeFile, Path: d}) } - sdevs = []VDevSpec{ - {Type: VDevTypeFile, Path: s3path}} - vdevs = []VDevSpec{ - VDevSpec{Type: VDevTypeMirror, Devices: mdevs}, - VDevSpec{Type: VDevTypeSpare, Devices: sdevs}, + sdevs = []zfs.VDevSpec{ + {Type: zfs.VDevTypeFile, Path: s3path}} + vdevs = []zfs.VDevSpec{ + zfs.VDevSpec{Type: zfs.VDevTypeMirror, Devices: mdevs}, + zfs.VDevSpec{Type: zfs.VDevTypeSpare, Devices: sdevs}, } - props := make(map[PoolProp]string) - fsprops := make(map[ZFSProp]string) + props := make(map[zfs.PoolProp]string) + fsprops := make(map[zfs.ZFSProp]string) features := make(map[string]string) - fsprops[ZFSPropMountpoint] = "none" + fsprops[zfs.ZFSPropMountpoint] = "none" features["async_destroy"] = "enabled" features["empty_bpobj"] = "enabled" features["lz4_compress"] = "enabled" - pool, err := PoolCreate(TST_POOL_NAME, vdevs, features, props, fsprops) + pool, err := zfs.PoolCreate(TST_POOL_NAME, vdevs, features, props, fsprops) if err != nil { t.Error(err) // try cleanup @@ -84,15 +99,15 @@ func TestPoolCreate(t *testing.T) { os.Remove(s1path) os.Remove(s2path) os.Remove(s3path) - println("PASS") + println("PASS\n") } // Open and list all pools and them state on the system // Then list properties of last pool in the list -func TestPoolOpenAll(t *testing.T) { +func zpoolTestPoolOpenAll(t *testing.T) { println("TEST PoolOpenAll() ... ") var pname string - pools, err := PoolOpenAll() + pools, err := zfs.PoolOpenAll() if err != nil { t.Error(err) return @@ -114,123 +129,12 @@ func TestPoolOpenAll(t *testing.T) { println("\tPool: ", pname, " state: ", pstate) p.Close() } - if len(pname) > 0 { - // test open on last pool - println("\tTry to open pool ", pname) - p, err := PoolOpen(pname) - if err != nil { - t.Error(err) - return - } - println("\tOpen pool: ", pname, " success") - println("\t", pname, " PROPERTIES:") - - pc, _ := strconv.Atoi(p.Properties[PoolNumProps].Value) - if len(p.Properties) != (pc + 1) { - p.Close() - t.Error(fmt.Sprint("Number of zpool properties does not match ", - len(p.Properties), " != ", pc+1)) - return - } - for key, value := range p.Properties { - pkey := PoolProp(key) - println("\t\t", p.PropertyToName(pkey), " = ", value.Value, " <- ", value.Source) - } - for key, value := range p.Features { - fmt.Printf("\t feature@%s = %s <- local\n", key, value) - } - if p.Properties[PoolPropListsnaps].Value == "off" { - println("\tlistsnapshots to on") - if err = p.SetProperty(PoolPropListsnaps, "on"); err != nil { - t.Error(err) - } - } else { - println("\tlistsnapshots to off") - if err = p.SetProperty(PoolPropListsnaps, "off"); err != nil { - t.Error(err) - } - } - if err == nil { - println("\tlistsnapshots", "is changed to ", - p.Properties[PoolPropListsnaps].Value, " <- ", - p.Properties[PoolPropListsnaps].Source) - } - p.Close() - } - println("PASS") + println("PASS\n") } -func TestDatasetCreate(t *testing.T) { - print("TEST DatasetCreate(", TST_DATASET_PATH, ") ... ") - props := make(map[ZFSProp]Property) - d, err := DatasetCreate(TST_DATASET_PATH, DatasetTypeFilesystem, props) - if err != nil { - t.Error(err) - return - } - d.Close() - println("PASS") -} - -func TestDatasetOpen(t *testing.T) { - print("TEST DatasetOpen(", TST_DATASET_PATH, ") ... ") - d, err := DatasetOpen(TST_DATASET_PATH) - if err != nil { - t.Error(err) - return - } - d.Close() - println("PASS") -} - -func printDatasets(ds []Dataset) error { - for _, d := range ds { - path, err := d.Path() - if err != nil { - return err - } - println("\t", path) - if len(d.Children) > 0 { - printDatasets(d.Children) - } - } - return nil -} - -func TestDatasetOpenAll(t *testing.T) { - println("TEST DatasetOpenAll()/DatasetCloseAll() ... ") - ds, err := DatasetOpenAll() - if err != nil { - t.Error(err) - return - } - if err = printDatasets(ds); err != nil { - DatasetCloseAll(ds) - t.Error(err) - return - } - DatasetCloseAll(ds) - println("PASS") -} - -func TestDatasetDestroy(t *testing.T) { - print("TEST DATASET Destroy()", TST_DATASET_PATH, " ... ") - d, err := DatasetOpen(TST_DATASET_PATH) - if err != nil { - t.Error(err) - return - } - defer d.Close() - if err = d.Destroy(false); err != nil { - t.Error(err) - return - } - println("PASS") -} - -func TestPoolDestroy(t *testing.T) { - print("TEST POOL Destroy()", TST_POOL_NAME, " ... ") - p, err := PoolOpen(TST_POOL_NAME) +func zpoolTestPoolDestroy(t *testing.T) { + println("TEST POOL Destroy( ", TST_POOL_NAME, " ) ... ") + p, err := zfs.PoolOpen(TST_POOL_NAME) if err != nil { t.Error(err) return @@ -240,15 +144,15 @@ func TestPoolDestroy(t *testing.T) { t.Error(err.Error()) return } - println("PASS") + println("PASS\n") } -func TestFailPoolOpen(t *testing.T) { - print("TEST failing to open pool ... ") +func zpoolTestFailPoolOpen(t *testing.T) { + println("TEST open of non existing pool ... ") pname := "fail to open this pool" - p, err := PoolOpen(pname) + p, err := zfs.PoolOpen(pname) if err != nil { - println("PASS") + println("PASS\n") return } t.Error("PoolOpen pass when it should fail") @@ -256,19 +160,22 @@ func TestFailPoolOpen(t *testing.T) { } func ExamplePoolProp() { - if pool, err := PoolOpen("SSD"); err == nil { - print("Pool size is: ", pool.Properties[PoolPropSize].Value) + if pool, err := zfs.PoolOpen("SSD"); err == nil { + print("Pool size is: ", pool.Properties[zfs.PoolPropSize].Value) // Turn on snapshot listing for pool - pool.SetProperty(PoolPropListsnaps, "on") + pool.SetProperty(zfs.PoolPropListsnaps, "on") } else { print("Error: ", err) } } +/* ------------------------------------------------------------------------- */ +// EXAMPLES: + // Open and list all pools on system with them properties func ExamplePoolOpenAll() { // Lets open handles to all active pools on system - pools, err := PoolOpenAll() + pools, err := zfs.PoolOpenAll() if err != nil { println(err) } @@ -277,15 +184,15 @@ func ExamplePoolOpenAll() { for _, p := range pools { // Print fancy header fmt.Printf("\n -----------------------------------------------------------\n") - fmt.Printf(" POOL: %49s \n", p.Properties[PoolPropName].Value) + fmt.Printf(" POOL: %49s \n", p.Properties[zfs.PoolPropName].Value) fmt.Printf("|-----------------------------------------------------------|\n") fmt.Printf("| PROPERTY | VALUE | SOURCE |\n") fmt.Printf("|-----------------------------------------------------------|\n") // Iterate pool properties and print name, value and source for key, prop := range p.Properties { - pkey := PoolProp(key) - if pkey == PoolPropName { + pkey := zfs.PoolProp(key) + if pkey == zfs.PoolPropName { continue // Skip name its already printed above } fmt.Printf("|%14s | %20s | %15s |\n", p.PropertyToName(pkey), @@ -302,33 +209,33 @@ func ExamplePoolOpenAll() { func ExamplePoolCreate() { disks := [2]string{"/dev/disk/by-id/ATA-123", "/dev/disk/by-id/ATA-456"} - var vdevs, mdevs, sdevs []VDevSpec + var vdevs, mdevs, sdevs []zfs.VDevSpec // build mirror devices specs for _, d := range disks { mdevs = append(mdevs, - VDevSpec{Type: VDevTypeDisk, Path: d}) + zfs.VDevSpec{Type: zfs.VDevTypeDisk, Path: d}) } // spare device specs - sdevs = []VDevSpec{ - {Type: VDevTypeDisk, Path: "/dev/disk/by-id/ATA-789"}} + sdevs = []zfs.VDevSpec{ + {Type: zfs.VDevTypeDisk, Path: "/dev/disk/by-id/ATA-789"}} // pool specs - vdevs = []VDevSpec{ - VDevSpec{Type: VDevTypeMirror, Devices: mdevs}, - VDevSpec{Type: VDevTypeSpare, Devices: sdevs}, + vdevs = []zfs.VDevSpec{ + zfs.VDevSpec{Type: zfs.VDevTypeMirror, Devices: mdevs}, + zfs.VDevSpec{Type: zfs.VDevTypeSpare, Devices: sdevs}, } // pool properties - props := make(map[PoolProp]string) + props := make(map[zfs.PoolProp]string) // root dataset filesystem properties - fsprops := make(map[ZFSProp]string) + fsprops := make(map[zfs.ZFSProp]string) // pool features features := make(map[string]string) // Turn off auto mounting by ZFS - fsprops[ZFSPropMountpoint] = "none" + fsprops[zfs.ZFSPropMountpoint] = "none" // Enable some features features["async_destroy"] = "enabled" @@ -337,7 +244,7 @@ func ExamplePoolCreate() { // Based on specs formed above create test pool as 2 disk mirror and // one spare disk - pool, err := PoolCreate("TESTPOOL", vdevs, features, props, fsprops) + pool, err := zfs.PoolCreate("TESTPOOL", vdevs, features, props, fsprops) if err != nil { println("Error: ", err.Error()) return @@ -349,7 +256,7 @@ func ExamplePool_Destroy() { pname := "TESTPOOL" // Need handle to pool at first place - p, err := PoolOpen(pname) + p, err := zfs.PoolOpen(pname) if err != nil { println("Error: ", err.Error()) return @@ -363,33 +270,3 @@ func ExamplePool_Destroy() { return } } - -// Example of creating ZFS volume -func ExampleDatasetCreate() { - // Create map to represent ZFS dataset properties. This is equivalent to - // list of properties you can get from ZFS CLI tool, and some more - // internally used by libzfs. - props := make(map[ZFSProp]Property) - - // I choose to create (block) volume 1GiB in size. Size is just ZFS dataset - // property and this is done as map of strings. So, You have to either - // specify size as base 10 number in string, or use strconv package or - // similar to convert in to string (base 10) from numeric type. - strSize := "1073741824" - - props[ZFSPropVolsize] = Property{Value: strSize} - // In addition I explicitly choose some more properties to be set. - props[ZFSPropVolblocksize] = Property{Value: "4096"} - props[ZFSPropReservation] = Property{Value: strSize} - - // Lets create desired volume - d, err := DatasetCreate("TESTPOOL/VOLUME1", DatasetTypeVolume, props) - if err != nil { - println(err.Error()) - return - } - // Dataset have to be closed for memory cleanup - defer d.Close() - - println("Created zfs volume TESTPOOL/VOLUME1") -} From 49330dac1e5e0c56d466a87ce22ba0078d2023ac Mon Sep 17 00:00:00 2001 From: fkasumovic Date: Thu, 4 Jun 2015 00:04:06 +0200 Subject: [PATCH 06/36] Create LICENSE.md --- LICENSE.md | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 LICENSE.md diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 0000000..1aaa963 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,27 @@ +Copyright (c) 2015, Faruk Kasumovic +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of go-libzfs nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. From 44640772b1bfd400fda1adb1566b1068154060f7 Mon Sep 17 00:00:00 2001 From: fkasumovic Date: Thu, 4 Jun 2015 00:05:13 +0200 Subject: [PATCH 07/36] Update LICENSE.md --- LICENSE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE.md b/LICENSE.md index 1aaa963..c1aaecb 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,4 +1,4 @@ -Copyright (c) 2015, Faruk Kasumovic +Copyright (c) 2015, Faruk Kasumovic All rights reserved. Redistribution and use in source and binary forms, with or without From 39d6835ce36ee6f39475028bb5972fea2a8e1bba Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Thu, 4 Jun 2015 00:08:20 +0200 Subject: [PATCH 08/36] - LICENSE file is now replaced with LICENSE.md So it fit GitHub established practice --- LICENSE | 28 ---------------------------- 1 file changed, 28 deletions(-) delete mode 100644 LICENSE diff --git a/LICENSE b/LICENSE deleted file mode 100644 index dfc205d..0000000 --- a/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2015, Faruk Kasumovic -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this -list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors -may be used to endorse or promote products derived from this software without -specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - From 507abac6835d8533ff2bf2e1a545b765fc4383a2 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Mon, 8 Jun 2015 23:41:22 +0200 Subject: [PATCH 09/36] - Add argument to pool Export for history log string, hard force (ExportForce), export and import tests, and overall tests fixes and improvements --- a_test.go | 6 ++++ zfs_test.go | 27 ++++++++++++--- zpool.go | 13 +++++-- zpool_test.go | 95 ++++++++++++++++++++++++++++++++++++++------------- 4 files changed, 111 insertions(+), 30 deletions(-) diff --git a/a_test.go b/a_test.go index fae4a3c..7665100 100644 --- a/a_test.go +++ b/a_test.go @@ -9,6 +9,10 @@ import ( func Test(t *testing.T) { zpoolTestPoolCreate(t) + zpoolTestExport(t) + zpoolTestImport(t) + zpoolTestExportForce(t) + zpoolTestImport(t) zpoolTestPoolOpenAll(t) zpoolTestFailPoolOpen(t) @@ -20,4 +24,6 @@ func Test(t *testing.T) { zfsTestDatasetDestroy(t) zpoolTestPoolDestroy(t) + + cleanupVDisks() } diff --git a/zfs_test.go b/zfs_test.go index 5f82b3a..ae7858b 100644 --- a/zfs_test.go +++ b/zfs_test.go @@ -23,8 +23,7 @@ func printDatasets(ds []zfs.Dataset) error { if err != nil { return err } - fmt.Printf(" %30s | %10s\n", path, - p.Value) + fmt.Printf(" %30s | %10s\n", path, p.Value) if len(d.Children) > 0 { printDatasets(d.Children) } @@ -159,13 +158,33 @@ func ExampleDatasetOpen() { d, err := zfs.DatasetOpen("TESTPOOL/DATASET1") if err != nil { panic(err.Error()) - return } defer d.Close() var p zfs.Property if p, err = d.GetProperty(zfs.ZFSPropAvailable); err != nil { panic(err.Error()) - return } println(d.PropertyToName(zfs.ZFSPropAvailable), " = ", p.Value) } + +func ExampleDatasetOpenAll() { + datasets, err := zfs.DatasetOpenAll() + if err != nil { + panic(err.Error()) + } + defer zfs.DatasetCloseAll(datasets) + + // Print out path and type of root datasets + for _, d := range datasets { + path, err := d.Path() + if err != nil { + panic(err.Error()) + } + p, err := d.GetProperty(zfs.ZFSPropType) + if err != nil { + panic(err.Error()) + } + fmt.Printf("%30s | %10s\n", path, p.Value) + } + +} diff --git a/zpool.go b/zpool.go index cc1b392..efeb768 100644 --- a/zpool.go +++ b/zpool.go @@ -549,13 +549,20 @@ func (pool *Pool) Destroy(logStr string) (err error) { // Before exporting the pool, all datasets within the pool are unmounted. // A pool can not be exported if it has a shared spare that is currently // being used. -func (pool *Pool) Export(force bool) (err error) { +func (pool *Pool) Export(force bool, log string) (err error) { var force_t C.boolean_t = 0 if force { force_t = 1 } - retcode := C.zpool_export(pool.list.zph, force_t, nil) - if retcode != 0 { + if rc := C.zpool_export(pool.list.zph, force_t, C.CString(log)); rc != 0 { + err = LastError() + } + return +} + +// Hard force +func (pool *Pool) ExportForce(log string) (err error) { + if rc := C.zpool_export_force(pool.list.zph, C.CString(log)); rc != 0 { err = LastError() } return diff --git a/zpool_test.go b/zpool_test.go index 3a19add..2f2e5e0 100644 --- a/zpool_test.go +++ b/zpool_test.go @@ -26,6 +26,36 @@ func CreateTmpSparse(prefix string, size int64) (path string, err error) { return } +var s1path, s2path, s3path string + +// This will create sparse files in tmp directory, +// for purpose of creating test pool. +func createTestpoolVdisks() (err error) { + if s1path, err = CreateTmpSparse("zfs_test_", 0x140000000); err != nil { + return + } + if s2path, err = CreateTmpSparse("zfs_test_", 0x140000000); err != nil { + // try cleanup + os.Remove(s1path) + return + } + if s3path, err = CreateTmpSparse("zfs_test_", 0x140000000); err != nil { + // try cleanup + os.Remove(s1path) + os.Remove(s2path) + return + } + return +} + +// Cleanup sparse files used for tests +func cleanupVDisks() { + // try cleanup + os.Remove(s1path) + os.Remove(s2path) + os.Remove(s3path) +} + /* ------------------------------------------------------------------------- */ // TESTS: @@ -43,26 +73,13 @@ func zpoolTestPoolCreate(t *testing.T) { p.Close() TST_POOL_NAME += "0" } - - var s1path, s2path, s3path string var err error - if s1path, err = CreateTmpSparse("zfs_test_", 0x140000000); err != nil { - t.Error(err) - return - } - if s2path, err = CreateTmpSparse("zfs_test_", 0x140000000); err != nil { - // try cleanup - os.Remove(s1path) - t.Error(err) - return - } - if s3path, err = CreateTmpSparse("zfs_test_", 0x140000000); err != nil { - // try cleanup - os.Remove(s1path) - os.Remove(s2path) + + if err = createTestpoolVdisks(); err != nil { t.Error(err) return } + disks := [2]string{s1path, s2path} var vdevs, mdevs, sdevs []zfs.VDevSpec @@ -95,10 +112,7 @@ func zpoolTestPoolCreate(t *testing.T) { return } defer pool.Close() - // try cleanup - os.Remove(s1path) - os.Remove(s2path) - os.Remove(s3path) + println("PASS\n") } @@ -159,6 +173,44 @@ func zpoolTestFailPoolOpen(t *testing.T) { p.Close() } +func zpoolTestExport(t *testing.T) { + println("TEST POOL Export( ", TST_POOL_NAME, " ) ... ") + p, err := zfs.PoolOpen(TST_POOL_NAME) + if err != nil { + t.Error(err) + return + } + p.Export(false, "Test exporting pool") + defer p.Close() + println("PASS\n") +} + +func zpoolTestExportForce(t *testing.T) { + println("TEST POOL ExportForce( ", TST_POOL_NAME, " ) ... ") + p, err := zfs.PoolOpen(TST_POOL_NAME) + if err != nil { + t.Error(err) + return + } + p.ExportForce("Test force exporting pool") + defer p.Close() + println("PASS\n") +} + +func zpoolTestImport(t *testing.T) { + println("TEST POOL Import( ", TST_POOL_NAME, " ) ... ") + p, err := zfs.PoolImport(TST_POOL_NAME, []string{"/tmp"}) + if err != nil { + t.Error(err) + return + } + defer p.Close() + println("PASS\n") +} + +/* ------------------------------------------------------------------------- */ +// EXAMPLES: + func ExamplePoolProp() { if pool, err := zfs.PoolOpen("SSD"); err == nil { print("Pool size is: ", pool.Properties[zfs.PoolPropSize].Value) @@ -169,9 +221,6 @@ func ExamplePoolProp() { } } -/* ------------------------------------------------------------------------- */ -// EXAMPLES: - // Open and list all pools on system with them properties func ExamplePoolOpenAll() { // Lets open handles to all active pools on system From 1d6e6a86cba23c8932d1a8d4a006d980471ef1db Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Mon, 8 Jun 2015 23:53:06 +0200 Subject: [PATCH 10/36] - Export and import pool examples --- zpool_test.go | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/zpool_test.go b/zpool_test.go index 2f2e5e0..d08678b 100644 --- a/zpool_test.go +++ b/zpool_test.go @@ -319,3 +319,33 @@ func ExamplePool_Destroy() { return } } + +func ExamplePoolImport() { + p, err := zfs.PoolImport("TESTPOOL", []string{"/dev/disk/by-id"}) + if err != nil { + panic(err) + } + p.Close() +} + +func ExamplePool_Export() { + p, err := zfs.PoolOpen("TESTPOOL") + if err != nil { + panic(err) + } + defer p.Close() + if err = p.Export(false, "Example exporting pool"); err != nil { + panic(err) + } +} + +func ExamplePool_ExportForce() { + p, err := zfs.PoolOpen("TESTPOOL") + if err != nil { + panic(err) + } + defer p.Close() + if err = p.ExportForce("Example exporting pool"); err != nil { + panic(err) + } +} From c1288a9a2ebb6c385730f99a664f6d30d5255db0 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Tue, 9 Jun 2015 14:26:35 +0200 Subject: [PATCH 11/36] - Improvements on properties, state and status handling of pool and dataset --- a_test.go | 2 ++ zfs.go | 6 ++++- zfs_test.go | 2 +- zpool.go | 12 ++++++---- zpool_test.go | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++- 5 files changed, 79 insertions(+), 8 deletions(-) diff --git a/a_test.go b/a_test.go index 7665100..d5b8204 100644 --- a/a_test.go +++ b/a_test.go @@ -13,6 +13,8 @@ func Test(t *testing.T) { zpoolTestImport(t) zpoolTestExportForce(t) zpoolTestImport(t) + zpoolTestPoolProp(t) + zpoolTestPoolStatusAndState(t) zpoolTestPoolOpenAll(t) zpoolTestFailPoolOpen(t) diff --git a/zfs.go b/zfs.go index 6b07fba..c4a1043 100644 --- a/zfs.go +++ b/zfs.go @@ -266,6 +266,10 @@ func (d *Dataset) SetProperty(p ZFSProp, value string) (err error) { if errcode != 0 { err = LastError() } + // Update Properties member with change made + if _, err = d.GetProperty(p); err != nil { + return + } return } @@ -398,7 +402,7 @@ func (d *Dataset) UnmountAll(flags int) (err error) { // ( returns built in string representation of property name). // This is optional, you can represent each property with string // name of choice. -func (d *Dataset) PropertyToName(p ZFSProp) (name string) { +func DatasetPropertyToName(p ZFSProp) (name string) { if p == ZFSNumProps { return "numofprops" } diff --git a/zfs_test.go b/zfs_test.go index ae7858b..2e4d833 100644 --- a/zfs_test.go +++ b/zfs_test.go @@ -164,7 +164,7 @@ func ExampleDatasetOpen() { if p, err = d.GetProperty(zfs.ZFSPropAvailable); err != nil { panic(err.Error()) } - println(d.PropertyToName(zfs.ZFSPropAvailable), " = ", p.Value) + println(zfs.DatasetPropertyToName(zfs.ZFSPropAvailable), " = ", p.Value) } func ExampleDatasetOpenAll() { diff --git a/zpool.go b/zpool.go index efeb768..0ef1c33 100644 --- a/zpool.go +++ b/zpool.go @@ -127,7 +127,7 @@ func PoolCloseAll(pools []Pool) { // ( returns built in string representation of property name). // This is optional, you can represent each property with string // name of choice. -func (pool *Pool) PropertyToName(p PoolProp) (name string) { +func PoolPropertyToName(p PoolProp) (name string) { if p == PoolNumProps { return "numofprops" } @@ -171,7 +171,7 @@ func (pool *Pool) GetProperty(p PoolProp) (prop Property, err error) { // First check if property exist at all if p < PoolPropName || p > PoolNumProps { err = errors.New(fmt.Sprint("Unknown zpool property: ", - pool.PropertyToName(p))) + PoolPropertyToName(p))) return } var list C.property_list_t @@ -210,15 +210,17 @@ func (pool *Pool) SetProperty(p PoolProp, value string) (err error) { // First check if property exist at all if p < PoolPropName || p > PoolNumProps { err = errors.New(fmt.Sprint("Unknown zpool property: ", - pool.PropertyToName(p))) + PoolPropertyToName(p))) return } - r := C.zpool_set_prop(pool.list.zph, C.CString(pool.PropertyToName(p)), C.CString(value)) + r := C.zpool_set_prop(pool.list.zph, C.CString(PoolPropertyToName(p)), C.CString(value)) if r != 0 { err = LastError() } else { // Update Properties member with change made - _, err = pool.GetProperty(p) + if _, err = pool.GetProperty(p); err != nil { + return + } } return } diff --git a/zpool_test.go b/zpool_test.go index d08678b..43f6de5 100644 --- a/zpool_test.go +++ b/zpool_test.go @@ -208,6 +208,59 @@ func zpoolTestImport(t *testing.T) { println("PASS\n") } +func zpoolTestPoolProp(t *testing.T) { + println("TEST PoolProp on ", TST_POOL_NAME, " ... ") + if pool, err := zfs.PoolOpen(TST_POOL_NAME); err == nil { + defer pool.Close() + // Turn on snapshot listing for pool + pool.SetProperty(zfs.PoolPropListsnaps, "on") + // Verify change is succesfull + if pool.Properties[zfs.PoolPropListsnaps].Value != "on" { + t.Error(fmt.Errorf("Update of pool property failed")) + return + } + + // Test fetching property + _, err := pool.GetProperty(zfs.PoolPropHealth) + if err != nil { + t.Error(err) + return + } + + // fetch all properties + if err = pool.ReloadProperties(); err != nil { + t.Error(err) + return + } + } else { + t.Error(err) + return + } + println("PASS\n") +} + +func zpoolTestPoolStatusAndState(t *testing.T) { + println("TEST pool Status/State ( ", TST_POOL_NAME, " ) ... ") + pool, err := zfs.PoolOpen(TST_POOL_NAME) + if err != nil { + t.Error(err.Error()) + return + } + defer pool.Close() + + if _, err = pool.Status(); err != nil { + t.Error(err.Error()) + return + } + + if _, err = pool.State(); err != nil { + t.Error(err.Error()) + return + } + + println("PASS\n") +} + /* ------------------------------------------------------------------------- */ // EXAMPLES: @@ -216,6 +269,15 @@ func ExamplePoolProp() { print("Pool size is: ", pool.Properties[zfs.PoolPropSize].Value) // Turn on snapshot listing for pool pool.SetProperty(zfs.PoolPropListsnaps, "on") + println("Changed property", + zfs.PoolPropertyToName(zfs.PoolPropListsnaps), "to value:", + pool.Properties[zfs.PoolPropListsnaps].Value) + + prop, err := pool.GetProperty(zfs.PoolPropHealth) + if err != nil { + panic(err) + } + println("Update and print out pool health:", prop.Value) } else { print("Error: ", err) } @@ -244,7 +306,8 @@ func ExamplePoolOpenAll() { if pkey == zfs.PoolPropName { continue // Skip name its already printed above } - fmt.Printf("|%14s | %20s | %15s |\n", p.PropertyToName(pkey), + fmt.Printf("|%14s | %20s | %15s |\n", + zfs.PoolPropertyToName(pkey), prop.Value, prop.Source) println("") } From f7f90fe57fbf85111224c1468f8a12cc0fd193bf Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Thu, 11 Jun 2015 00:51:46 +0200 Subject: [PATCH 12/36] - Pretty much completed set of examples and tests on 'pool' APIs implemented so far --- zpool.go | 7 +++++++ zpool_test.go | 28 +++++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/zpool.go b/zpool.go index 0ef1c33..0fb98ba 100644 --- a/zpool.go +++ b/zpool.go @@ -136,6 +136,13 @@ func PoolPropertyToName(p PoolProp) (name string) { return } +// Map POOL STATE to string. +func PoolStateToName(state PoolState) (name string) { + ps := C.pool_state_t(state) + name = C.GoString(C.zpool_pool_state_to_name(ps)) + return +} + // Re-read ZFS pool properties and features, refresh Pool.Properties and // Pool.Features map func (pool *Pool) ReloadProperties() (err error) { diff --git a/zpool_test.go b/zpool_test.go index 43f6de5..5cc3079 100644 --- a/zpool_test.go +++ b/zpool_test.go @@ -227,6 +227,17 @@ func zpoolTestPoolProp(t *testing.T) { return } + // this test pool should not be bootable + prop, err := pool.GetProperty(zfs.PoolPropBootfs) + if err != nil { + t.Error(err) + return + } + if prop.Value != "-" { + t.Errorf("Failed at bootable fs property evaluation") + return + } + // fetch all properties if err = pool.ReloadProperties(); err != nil { t.Error(err) @@ -253,10 +264,12 @@ func zpoolTestPoolStatusAndState(t *testing.T) { return } - if _, err = pool.State(); err != nil { + var pstate zfs.PoolState + if pstate, err = pool.State(); err != nil { t.Error(err.Error()) return } + println("POOL", TST_POOL_NAME, "state:", zfs.PoolStateToName(pstate)) println("PASS\n") } @@ -412,3 +425,16 @@ func ExamplePool_ExportForce() { panic(err) } } + +func ExamplePool_State() { + p, err := zfs.PoolOpen("TESTPOOL") + if err != nil { + panic(err) + } + defer p.Close() + pstate, err := p.State() + if err != nil { + panic(err) + } + println("POOL TESTPOOL state:", zfs.PoolStateToName(pstate)) +} From db4703b70824f90c8e0d3f69df0dff7f1806352f Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Wed, 28 Oct 2015 11:00:08 +0100 Subject: [PATCH 13/36] - Add properties from libzfs in 0.6.5 version --- common.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/common.go b/common.go index f7e6c55..6089530 100644 --- a/common.go +++ b/common.go @@ -152,6 +152,10 @@ const ( PoolPropComment PoolPropExpandsz PoolPropFreeing + PoolPropFragmentaion + PoolPropLeaked + PoolPropMaxBlockSize + PoolPropTName PoolNumProps ) @@ -235,6 +239,7 @@ const ( ZFSPropSelinux_rootcontext ZFSPropRelatime ZFSPropRedundant_metadata + ZFSPropOverlay ZFSNumProps ) From bc197372226a56021e0c6c57f673051cb0002167 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Fri, 4 Dec 2015 23:05:19 +0100 Subject: [PATCH 14/36] - Changes to make library interface more clear and to better suit go package standards --- README.md | 6 +- common.go | 329 +++++++++++++++++++++++++++++++------------------- zfs.c | 4 +- zfs.go | 112 +++++++++-------- zfs_test.go | 74 ++++++------ zpool.go | 123 +++++++++++-------- zpool_test.go | 71 +++++------ 7 files changed, 420 insertions(+), 299 deletions(-) diff --git a/README.md b/README.md index adea694..cdd0dcf 100644 --- a/README.md +++ b/README.md @@ -46,10 +46,10 @@ props := make(map[ZFSProp]Property) // similar to convert in to string (base 10) from numeric type. strSize := "1073741824" -props[ZFSPropVolsize] = Property{Value: strSize} +props[DatasetPropVolsize] = Property{Value: strSize} // In addition I explicitly choose some more properties to be set. -props[ZFSPropVolblocksize] = Property{Value: "4096"} -props[ZFSPropReservation] = Property{Value: strSize} +props[DatasetPropVolblocksize] = Property{Value: "4096"} +props[DatasetPropReservation] = Property{Value: strSize} // Lets create desired volume d, err := DatasetCreate("TESTPOOL/VOLUME1", DatasetTypeVolume, props) diff --git a/common.go b/common.go index 6089530..d89a3e5 100644 --- a/common.go +++ b/common.go @@ -1,4 +1,4 @@ -// Implements basic manipulation of ZFS pools and data sets. +// Package zfs implements basic manipulation of ZFS pools and data sets. // Use libzfs C library instead CLI zfs tools, with goal // to let using and manipulating OpenZFS form with in go project. // @@ -23,36 +23,41 @@ import ( "errors" ) +// VDevType type of device in the pool type VDevType string -var libzfs_handle *C.struct_libzfs_handle +var libzfsHandle *C.struct_libzfs_handle func init() { - libzfs_handle = C.libzfs_init() + libzfsHandle = C.libzfs_init() return } // Types of Virtual Devices const ( - VDevTypeRoot VDevType = "root" - VDevTypeMirror = "mirror" - VDevTypeReplacing = "replacing" - VDevTypeRaidz = "raidz" - VDevTypeDisk = "disk" - VDevTypeFile = "file" - VDevTypeMissing = "missing" - VDevTypeHole = "hole" - VDevTypeSpare = "spare" - VDevTypeLog = "log" - VDevTypeL2cache = "l2cache" + VDevTypeRoot VDevType = "root" // VDevTypeRoot root device in ZFS pool + VDevTypeMirror = "mirror" // VDevTypeMirror mirror device in ZFS pool + VDevTypeReplacing = "replacing" // VDevTypeReplacing replacing + VDevTypeRaidz = "raidz" // VDevTypeRaidz RAIDZ device + VDevTypeDisk = "disk" // VDevTypeDisk device is disk + VDevTypeFile = "file" // VDevTypeFile device is file + VDevTypeMissing = "missing" // VDevTypeMissing missing device + VDevTypeHole = "hole" // VDevTypeHole hole + VDevTypeSpare = "spare" // VDevTypeSpare spare device + VDevTypeLog = "log" // VDevTypeLog ZIL device + VDevTypeL2cache = "l2cache" // VDevTypeL2cache cache device (disk) ) -type PoolProp int -type ZFSProp int +// Prop type to enumerate all different properties suppoerted by ZFS +type Prop int + +// PoolStatus type representing status of the pool type PoolStatus int + +// PoolState type representing pool state type PoolState uint64 -// Zfs pool or dataset property +// Property ZFS pool or dataset property value type Property struct { Value string Source string @@ -64,21 +69,21 @@ const ( * The following correspond to faults as defined in the (fault.fs.zfs.*) * event namespace. Each is associated with a corresponding message ID. */ - PoolStatusCorrupt_cache PoolStatus = iota /* corrupt /kernel/drv/zpool.cache */ - PoolStatusMissing_dev_r /* missing device with replicas */ - PoolStatusMissing_dev_nr /* missing device with no replicas */ - PoolStatusCorrupt_label_r /* bad device label with replicas */ - PoolStatusCorrupt_label_nr /* bad device label with no replicas */ - PoolStatusBad_guid_sum /* sum of device guids didn't match */ - PoolStatusCorrupt_pool /* pool metadata is corrupted */ - PoolStatusCorrupt_data /* data errors in user (meta)data */ - PoolStatusFailing_dev /* device experiencing errors */ - PoolStatusVersion_newer /* newer on-disk version */ - PoolStatusHostid_mismatch /* last accessed by another system */ - PoolStatusIo_failure_wait /* failed I/O, failmode 'wait' */ - PoolStatusIo_failure_continue /* failed I/O, failmode 'continue' */ - PoolStatusBad_log /* cannot read log chain(s) */ - PoolStatusErrata /* informational errata available */ + PoolStatusCorruptCache PoolStatus = iota /* corrupt /kernel/drv/zpool.cache */ + PoolStatusMissingDevR /* missing device with replicas */ + PoolStatusMissingDevNr /* missing device with no replicas */ + PoolStatusCorruptLabelR /* bad device label with replicas */ + PoolStatusCorruptLabelNr /* bad device label with no replicas */ + PoolStatusBadGUIDSum /* sum of device guids didn't match */ + PoolStatusCorruptPool /* pool metadata is corrupted */ + PoolStatusCorruptData /* data errors in user (meta)data */ + PoolStatusFailingDev /* device experiencing errors */ + PoolStatusVersionNewer /* newer on-disk version */ + PoolStatusHostidMismatch /* last accessed by another system */ + PoolStatusIoFailureWait /* failed I/O, failmode 'wait' */ + PoolStatusIoFailureContinue /* failed I/O, failmode 'continue' */ + PoolStatusBadLog /* cannot read log chain(s) */ + PoolStatusErrata /* informational errata available */ /* * If the pool has unsupported features but can still be opened in @@ -86,27 +91,27 @@ const ( * pool has unsupported features but cannot be opened at all, its * status is ZPOOL_STATUS_UNSUP_FEAT_READ. */ - PoolStatusUnsup_feat_read /* unsupported features for read */ - PoolStatusUnsup_feat_write /* unsupported features for write */ + PoolStatusUnsupFeatRead /* unsupported features for read */ + PoolStatusUnsupFeatWrite /* unsupported features for write */ /* * These faults have no corresponding message ID. At the time we are * checking the status, the original reason for the FMA fault (I/O or * checksum errors) has been lost. */ - PoolStatusFaulted_dev_r /* faulted device with replicas */ - PoolStatusFaulted_dev_nr /* faulted device with no replicas */ + PoolStatusFaultedDevR /* faulted device with replicas */ + PoolStatusFaultedDevNr /* faulted device with no replicas */ /* * The following are not faults per se, but still an error possibly * requiring administrative attention. There is no corresponding * message ID. */ - PoolStatusVersion_older /* older legacy on-disk version */ - PoolStatusFeat_disabled /* supported features are disabled */ - PoolStatusResilvering /* device being resilvered */ - PoolStatusOffline_dev /* device online */ - PoolStatusRemoved_dev /* removed device */ + PoolStatusVersionOlder /* older legacy on-disk version */ + PoolStatusFeatDisabled /* supported features are disabled */ + PoolStatusResilvering /* device being resilvered */ + PoolStatusOfflineDev /* device online */ + PoolStatusRemovedDev /* removed device */ /* * Finally, the following indicates a healthy pool. @@ -129,12 +134,12 @@ const ( // Pool properties. Enumerates available ZFS pool properties. Use it to access // pool properties either to read or set soecific property. const ( - PoolPropName PoolProp = iota + PoolPropName Prop = iota PoolPropSize PoolPropCapacity PoolPropAltroot PoolPropHealth - PoolPropGuid + PoolPropGUID PoolPropVersion PoolPropBootfs PoolPropDelegation @@ -166,102 +171,178 @@ const ( * the property table in module/zcommon/zfs_prop.c. */ const ( - ZFSPropType ZFSProp = iota - ZFSPropCreation - ZFSPropUsed - ZFSPropAvailable - ZFSPropReferenced - ZFSPropCompressratio - ZFSPropMounted - ZFSPropOrigin - ZFSPropQuota - ZFSPropReservation - ZFSPropVolsize - ZFSPropVolblocksize - ZFSPropRecordsize - ZFSPropMountpoint - ZFSPropSharenfs - ZFSPropChecksum - ZFSPropCompression - ZFSPropAtime - ZFSPropDevices - ZFSPropExec - ZFSPropSetuid - ZFSPropReadonly - ZFSPropZoned - ZFSPropSnapdir - ZFSPropPrivate /* not exposed to user, temporary */ - ZFSPropAclinherit - ZFSPropCreatetxg /* not exposed to the user */ - ZFSPropName /* not exposed to the user */ - ZFSPropCanmount - ZFSPropIscsioptions /* not exposed to the user */ - ZFSPropXattr - ZFSPropNumclones /* not exposed to the user */ - ZFSPropCopies - ZFSPropVersion - ZFSPropUtf8only - ZFSPropNormalize - ZFSPropCase - ZFSPropVscan - ZFSPropNbmand - ZFSPropSharesmb - ZFSPropRefquota - ZFSPropRefreservation - ZFSPropGuid - ZFSPropPrimarycache - ZFSPropSecondarycache - ZFSPropUsedsnap - ZFSPropUsedds - ZFSPropUsedchild - ZFSPropUsedrefreserv - ZFSPropUseraccounting /* not exposed to the user */ - ZFSPropStmf_shareinfo /* not exposed to the user */ - ZFSPropDefer_destroy - ZFSPropUserrefs - ZFSPropLogbias - ZFSPropUnique /* not exposed to the user */ - ZFSPropObjsetid /* not exposed to the user */ - ZFSPropDedup - ZFSPropMlslabel - ZFSPropSync - ZFSPropRefratio - ZFSPropWritten - ZFSPropClones - ZFSPropLogicalused - ZFSPropLogicalreferenced - ZFSPropInconsistent /* not exposed to the user */ - ZFSPropSnapdev - ZFSPropAcltype - ZFSPropSelinux_context - ZFSPropSelinux_fscontext - ZFSPropSelinux_defcontext - ZFSPropSelinux_rootcontext - ZFSPropRelatime - ZFSPropRedundant_metadata - ZFSPropOverlay - ZFSNumProps + DatasetPropType Prop = iota + DatasetPropCreation + DatasetPropUsed + DatasetPropAvailable + DatasetPropReferenced + DatasetPropCompressratio + DatasetPropMounted + DatasetPropOrigin + DatasetPropQuota + DatasetPropReservation + DatasetPropVolsize + DatasetPropVolblocksize + DatasetPropRecordsize + DatasetPropMountpoint + DatasetPropSharenfs + DatasetPropChecksum + DatasetPropCompression + DatasetPropAtime + DatasetPropDevices + DatasetPropExec + DatasetPropSetuid + DatasetPropReadonly + DatasetPropZoned + DatasetPropSnapdir + DatasetPropPrivate /* not exposed to user, temporary */ + DatasetPropAclinherit + DatasetPropCreatetxg /* not exposed to the user */ + DatasetPropName /* not exposed to the user */ + DatasetPropCanmount + DatasetPropIscsioptions /* not exposed to the user */ + DatasetPropXattr + DatasetPropNumclones /* not exposed to the user */ + DatasetPropCopies + DatasetPropVersion + DatasetPropUtf8only + DatasetPropNormalize + DatasetPropCase + DatasetPropVscan + DatasetPropNbmand + DatasetPropSharesmb + DatasetPropRefquota + DatasetPropRefreservation + DatasetPropGUID + DatasetPropPrimarycache + DatasetPropSecondarycache + DatasetPropUsedsnap + DatasetPropUsedds + DatasetPropUsedchild + DatasetPropUsedrefreserv + DatasetPropUseraccounting /* not exposed to the user */ + DatasetPropStmfShareinfo /* not exposed to the user */ + DatasetPropDeferDestroy + DatasetPropUserrefs + DatasetPropLogbias + DatasetPropUnique /* not exposed to the user */ + DatasetPropObjsetid /* not exposed to the user */ + DatasetPropDedup + DatasetPropMlslabel + DatasetPropSync + DatasetPropRefratio + DatasetPropWritten + DatasetPropClones + DatasetPropLogicalused + DatasetPropLogicalreferenced + DatasetPropInconsistent /* not exposed to the user */ + DatasetPropSnapdev + DatasetPropAcltype + DatasetPropSelinuxContext + DatasetPropSelinuxFsContext + DatasetPropSelinuxDefContext + DatasetPropSelinuxRootContext + DatasetPropRelatime + DatasetPropRedundantMetadata + DatasetPropOverlay + DatasetNumProps ) -// Get last underlying libzfs error description if any +// LastError get last underlying libzfs error description if any func LastError() (err error) { - errno := C.libzfs_errno(libzfs_handle) + errno := C.libzfs_errno(libzfsHandle) if errno == 0 { return nil } - return errors.New(C.GoString(C.libzfs_error_description(libzfs_handle))) + return errors.New(C.GoString(C.libzfs_error_description(libzfsHandle))) } -// Force clear of any last error set by undeliying libzfs +// ClearLastError force clear of any last error set by undeliying libzfs func ClearLastError() (err error) { err = LastError() - C.clear_last_error(libzfs_handle) + C.clear_last_error(libzfsHandle) return } -func boolean_t(b bool) (r C.boolean_t) { +func booleanT(b bool) (r C.boolean_t) { if b { return 1 } return 0 } + +// ZFS errors +const ( + ESuccess = 0 /* no error -- success */ + ENomem = 2000 << iota /* out of memory */ + EBadprop /* invalid property value */ + EPropreadonly /* cannot set readonly property */ + EProptype /* property does not apply to dataset type */ + EPropnoninherit /* property is not inheritable */ + EPropspace /* bad quota or reservation */ + EBadtype /* dataset is not of appropriate type */ + EBusy /* pool or dataset is busy */ + EExists /* pool or dataset already exists */ + ENoent /* no such pool or dataset */ + EBadstream /* bad backup stream */ + EDsreadonly /* dataset is readonly */ + EVoltoobig /* volume is too large for 32-bit system */ + EInvalidname /* invalid dataset name */ + EBadrestore /* unable to restore to destination */ + EBadbackup /* backup failed */ + EBadtarget /* bad attach/detach/replace target */ + ENodevice /* no such device in pool */ + EBaddev /* invalid device to add */ + ENoreplicas /* no valid replicas */ + EResilvering /* currently resilvering */ + EBadversion /* unsupported version */ + EPoolunavail /* pool is currently unavailable */ + EDevoverflow /* too many devices in one vdev */ + EBadpath /* must be an absolute path */ + ECrosstarget /* rename or clone across pool or dataset */ + EZoned /* used improperly in local zone */ + EMountfailed /* failed to mount dataset */ + EUmountfailed /* failed to unmount dataset */ + EUnsharenfsfailed /* unshare(1M) failed */ + ESharenfsfailed /* share(1M) failed */ + EPerm /* permission denied */ + ENospc /* out of space */ + EFault /* bad address */ + EIo /* I/O error */ + EIntr /* signal received */ + EIsspare /* device is a hot spare */ + EInvalconfig /* invalid vdev configuration */ + ERecursive /* recursive dependency */ + ENohistory /* no history object */ + EPoolprops /* couldn't retrieve pool props */ + EPoolNotsup /* ops not supported for this type of pool */ + EPoolInvalarg /* invalid argument for this pool operation */ + ENametoolong /* dataset name is too long */ + EOpenfailed /* open of device failed */ + ENocap /* couldn't get capacity */ + ELabelfailed /* write of label failed */ + EBadwho /* invalid permission who */ + EBadperm /* invalid permission */ + EBadpermset /* invalid permission set name */ + ENodelegation /* delegated administration is disabled */ + EUnsharesmbfailed /* failed to unshare over smb */ + ESharesmbfailed /* failed to share over smb */ + EBadcache /* bad cache file */ + EIsl2CACHE /* device is for the level 2 ARC */ + EVdevnotsup /* unsupported vdev type */ + ENotsup /* ops not supported on this dataset */ + EActiveSpare /* pool has active shared spare devices */ + EUnplayedLogs /* log device has unplayed logs */ + EReftagRele /* snapshot release: tag not found */ + EReftagHold /* snapshot hold: tag already exists */ + ETagtoolong /* snapshot hold/rele: tag too long */ + EPipefailed /* pipe create failed */ + EThreadcreatefailed /* thread create failed */ + EPostsplitOnline /* onlining a disk after splitting it */ + EScrubbing /* currently scrubbing */ + ENoScrub /* no active scrub */ + EDiff /* general failure of zfs diff */ + EDiffdata /* bad zfs diff data */ + EPoolreadonly /* pool is in read-only mode */ + EUnknown +) diff --git a/zfs.c b/zfs.c index 395a8e4..46b7721 100644 --- a/zfs.c +++ b/zfs.c @@ -1,7 +1,7 @@ /* C wrappers around some zfs calls and C in general that should simplify * using libzfs from go language, make go code shorter and more readable. */ - + #include #include #include @@ -72,7 +72,7 @@ int read_dataset_property(zfs_handle_t *zh, property_list_t *list, int prop) { zprop_source_t source; char statbuf[INT_MAX_VALUE]; - r = zfs_prop_get(zh, prop, + r = zfs_prop_get(zh, prop, list->value, INT_MAX_VALUE, &source, statbuf, INT_MAX_VALUE, 1); if (r == 0) { // strcpy(list->name, zpool_prop_to_name(prop)); diff --git a/zfs.go b/zfs.go index c4a1043..288fab7 100644 --- a/zfs.go +++ b/zfs.go @@ -14,20 +14,30 @@ const ( msgDatasetIsNil = "Dataset handle not initialized or its closed" ) +// DatasetProperties type is map of dataset or volume properties prop -> value +type DatasetProperties map[Prop]string + +// DatasetType defines enum of dataset types type DatasetType int32 const ( + // DatasetTypeFilesystem - file system dataset DatasetTypeFilesystem DatasetType = (1 << 0) - DatasetTypeSnapshot = (1 << 1) - DatasetTypeVolume = (1 << 2) - DatasetTypePool = (1 << 3) - DatasetTypeBookmark = (1 << 4) + // DatasetTypeSnapshot - snapshot of dataset + DatasetTypeSnapshot = (1 << 1) + // DatasetTypeVolume - volume (virtual block device) dataset + DatasetTypeVolume = (1 << 2) + // DatasetTypePool - pool dataset + DatasetTypePool = (1 << 3) + // DatasetTypeBookmark - bookmark dataset + DatasetTypeBookmark = (1 << 4) ) +// Dataset - ZFS dataset object type Dataset struct { list *C.dataset_list_t Type DatasetType - Properties map[ZFSProp]Property + Properties map[Prop]Property Children []Dataset } @@ -37,7 +47,7 @@ func (d *Dataset) openChildren() (err error) { errcode := C.dataset_list_children(d.list.zh, &(dataset.list)) for dataset.list != nil { dataset.Type = DatasetType(C.zfs_get_type(dataset.list.zh)) - dataset.Properties = make(map[ZFSProp]Property) + dataset.Properties = make(map[Prop]Property) err = dataset.ReloadProperties() if err != nil { return @@ -49,7 +59,7 @@ func (d *Dataset) openChildren() (err error) { err = LastError() return } - for ci, _ := range d.Children { + for ci := range d.Children { if err = d.Children[ci].openChildren(); err != nil { return } @@ -57,11 +67,11 @@ func (d *Dataset) openChildren() (err error) { return } -// Recursive get handles to all available datasets on system +// DatasetOpenAll recursive get handles to all available datasets on system // (file-systems, volumes or snapshots). func DatasetOpenAll() (datasets []Dataset, err error) { var dataset Dataset - errcode := C.dataset_list_root(libzfs_handle, &dataset.list) + errcode := C.dataset_list_root(libzfsHandle, &dataset.list) for dataset.list != nil { dataset.Type = DatasetType(C.zfs_get_type(dataset.list.zh)) err = dataset.ReloadProperties() @@ -75,7 +85,7 @@ func DatasetOpenAll() (datasets []Dataset, err error) { err = LastError() return } - for ci, _ := range datasets { + for ci := range datasets { if err = datasets[ci].openChildren(); err != nil { return } @@ -83,24 +93,25 @@ func DatasetOpenAll() (datasets []Dataset, err error) { return } -// Close all datasets in slice and all of its recursive children datasets +// DatasetCloseAll close all datasets in slice and all of its recursive +// children datasets func DatasetCloseAll(datasets []Dataset) { for _, d := range datasets { d.Close() } } -// Open dataset and all of its recursive children datasets +// DatasetOpen open dataset and all of its recursive children datasets func DatasetOpen(path string) (d Dataset, err error) { d.list = C.create_dataset_list_item() - d.list.zh = C.zfs_open(libzfs_handle, C.CString(path), 0xF) + d.list.zh = C.zfs_open(libzfsHandle, C.CString(path), 0xF) if d.list.zh == nil { err = LastError() return } d.Type = DatasetType(C.zfs_get_type(d.list.zh)) - d.Properties = make(map[ZFSProp]Property) + d.Properties = make(map[Prop]Property) err = d.ReloadProperties() if err != nil { return @@ -109,7 +120,7 @@ func DatasetOpen(path string) (d Dataset, err error) { return } -func datasetPropertiesTo_nvlist(props map[ZFSProp]Property) ( +func datasetPropertiesTonvlist(props map[Prop]Property) ( cprops *C.nvlist_t, err error) { // convert properties to nvlist C type r := C.nvlist_alloc(&cprops, C.NV_UNIQUE_NAME, 0) @@ -129,16 +140,17 @@ func datasetPropertiesTo_nvlist(props map[ZFSProp]Property) ( return } -// Create a new filesystem or volume on path representing pool/dataset or pool/parent/dataset +// DatasetCreate create a new filesystem or volume on path representing +// pool/dataset or pool/parent/dataset func DatasetCreate(path string, dtype DatasetType, - props map[ZFSProp]Property) (d Dataset, err error) { + props map[Prop]Property) (d Dataset, err error) { var cprops *C.nvlist_t - if cprops, err = datasetPropertiesTo_nvlist(props); err != nil { + if cprops, err = datasetPropertiesTonvlist(props); err != nil { return } defer C.nvlist_free(cprops) - errcode := C.zfs_create(libzfs_handle, C.CString(path), + errcode := C.zfs_create(libzfsHandle, C.CString(path), C.zfs_type_t(dtype), cprops) if errcode != 0 { err = LastError() @@ -146,7 +158,8 @@ func DatasetCreate(path string, dtype DatasetType, return } -// Close dataset and all its recursive children datasets (close handle and cleanup dataset object/s from memory) +// Close close dataset and all its recursive children datasets (close handle +// and cleanup dataset object/s from memory) func (d *Dataset) Close() { if d.list != nil && d.list.zh != nil { C.dataset_list_close(d.list) @@ -156,7 +169,7 @@ func (d *Dataset) Close() { } } -// Destroys the dataset. The caller must make sure that the filesystem +// Destroy destroys the dataset. The caller must make sure that the filesystem // isn't mounted, and that there are no active dependents. Set Defer argument // to true to defer destruction for when dataset is not in use. func (d *Dataset) Destroy(Defer bool) (err error) { @@ -165,13 +178,13 @@ func (d *Dataset) Destroy(Defer bool) (err error) { if e != nil { return } - dsType, e := d.GetProperty(ZFSPropType) + dsType, e := d.GetProperty(DatasetPropType) err = errors.New("Cannot destroy dataset " + path + ": " + dsType.Value + " has children") return } if d.list != nil { - if ec := C.zfs_destroy(d.list.zh, boolean_t(Defer)); ec != 0 { + if ec := C.zfs_destroy(d.list.zh, booleanT(Defer)); ec != 0 { err = LastError() } } else { @@ -180,7 +193,7 @@ func (d *Dataset) Destroy(Defer bool) (err error) { return } -// Recursively destroy children of dataset and dataset. +// DestroyRecursive recursively destroy children of dataset and dataset. func (d *Dataset) DestroyRecursive() (err error) { if len(d.Children) > 0 { for _, c := range d.Children { @@ -197,6 +210,7 @@ func (d *Dataset) DestroyRecursive() (err error) { return } +// Pool returns pool dataset belongs to func (d *Dataset) Pool() (p Pool, err error) { if d.list == nil { err = errors.New(msgDatasetIsNil) @@ -212,6 +226,7 @@ func (d *Dataset) Pool() (p Pool, err error) { return } +// ReloadProperties re-read dataset's properties func (d *Dataset) ReloadProperties() (err error) { if d.list == nil { err = errors.New(msgDatasetIsNil) @@ -220,8 +235,8 @@ func (d *Dataset) ReloadProperties() (err error) { var plist *C.property_list_t plist = C.new_property_list() defer C.free_properties(plist) - d.Properties = make(map[ZFSProp]Property) - for prop := ZFSPropType; prop < ZFSNumProps; prop++ { + d.Properties = make(map[Prop]Property) + for prop := DatasetPropType; prop < DatasetNumProps; prop++ { errcode := C.read_dataset_property(d.list.zh, plist, C.int(prop)) if errcode != 0 { continue @@ -232,9 +247,9 @@ func (d *Dataset) ReloadProperties() (err error) { return } -// Reload and return single specified property. This also reloads requested +// GetProperty reload and return single specified property. This also reloads requested // property in Properties map. -func (d *Dataset) GetProperty(p ZFSProp) (prop Property, err error) { +func (d *Dataset) GetProperty(p Prop) (prop Property, err error) { if d.list == nil { err = errors.New(msgDatasetIsNil) return @@ -253,10 +268,10 @@ func (d *Dataset) GetProperty(p ZFSProp) (prop Property, err error) { return } -// Set ZFS dataset property to value. Not all properties can be set, +// SetProperty set ZFS dataset property to value. Not all properties can be set, // some can be set only at creation time and some are read only. // Always check if returned error and its description. -func (d *Dataset) SetProperty(p ZFSProp, value string) (err error) { +func (d *Dataset) SetProperty(p Prop, value string) (err error) { if d.list == nil { err = errors.New(msgDatasetIsNil) return @@ -273,15 +288,15 @@ func (d *Dataset) SetProperty(p ZFSProp, value string) (err error) { return } -// Clones the dataset. The target must be of the same type as +// Clone - clones the dataset. The target must be of the same type as // the source. -func (d *Dataset) Clone(target string, props map[ZFSProp]Property) (rd Dataset, err error) { +func (d *Dataset) Clone(target string, props map[Prop]Property) (rd Dataset, err error) { var cprops *C.nvlist_t if d.list == nil { err = errors.New(msgDatasetIsNil) return } - if cprops, err = datasetPropertiesTo_nvlist(props); err != nil { + if cprops, err = datasetPropertiesTonvlist(props); err != nil { return } defer C.nvlist_free(cprops) @@ -293,14 +308,14 @@ func (d *Dataset) Clone(target string, props map[ZFSProp]Property) (rd Dataset, return } -// Create dataset snapshot. Set recur to true to snapshot child datasets. -func DatasetSnapshot(path string, recur bool, props map[ZFSProp]Property) (rd Dataset, err error) { +// DatasetSnapshot create dataset snapshot. Set recur to true to snapshot child datasets. +func DatasetSnapshot(path string, recur bool, props map[Prop]Property) (rd Dataset, err error) { var cprops *C.nvlist_t - if cprops, err = datasetPropertiesTo_nvlist(props); err != nil { + if cprops, err = datasetPropertiesTonvlist(props); err != nil { return } defer C.nvlist_free(cprops) - if errc := C.zfs_snapshot(libzfs_handle, C.CString(path), boolean_t(recur), cprops); errc != 0 { + if errc := C.zfs_snapshot(libzfsHandle, C.CString(path), booleanT(recur), cprops); errc != 0 { err = LastError() return } @@ -308,7 +323,7 @@ func DatasetSnapshot(path string, recur bool, props map[ZFSProp]Property) (rd Da return } -// Return zfs dataset path/name +// Path return zfs dataset path/name func (d *Dataset) Path() (path string, err error) { if d.list == nil { err = errors.New(msgDatasetIsNil) @@ -319,14 +334,14 @@ func (d *Dataset) Path() (path string, err error) { return } -// Rollabck dataset snapshot +// Rollback rollabck's dataset snapshot func (d *Dataset) Rollback(snap *Dataset, force bool) (err error) { if d.list == nil { err = errors.New(msgDatasetIsNil) return } if errc := C.zfs_rollback(d.list.zh, - snap.list.zh, boolean_t(force)); errc != 0 { + snap.list.zh, booleanT(force)); errc != 0 { err = LastError() } return @@ -334,20 +349,20 @@ func (d *Dataset) Rollback(snap *Dataset, force bool) (err error) { // Rename dataset func (d *Dataset) Rename(newname string, recur, - force_umount bool) (err error) { + forceUnmount bool) (err error) { if d.list == nil { err = errors.New(msgDatasetIsNil) return } if errc := C.zfs_rename(d.list.zh, C.CString(newname), - boolean_t(recur), boolean_t(force_umount)); errc != 0 { + booleanT(recur), booleanT(forceUnmount)); errc != 0 { err = LastError() } return } -// Checks to see if the mount is active. If the filesystem is mounted, fills -// in 'where' with the current mountpoint, and returns true. Otherwise, +// IsMounted checks to see if the mount is active. If the filesystem is mounted, +// sets in 'where' argument the current mountpoint, and returns true. Otherwise, // returns false. func (d *Dataset) IsMounted() (mounted bool, where string) { var cw *C.char @@ -386,7 +401,8 @@ func (d *Dataset) Unmount(flags int) (err error) { return } -// Unmount this filesystem and any children inheriting the mountpoint property. +// UnmountAll unmount this filesystem and any children inheriting the +// mountpoint property. func (d *Dataset) UnmountAll(flags int) (err error) { if d.list == nil { err = errors.New(msgDatasetIsNil) @@ -398,12 +414,12 @@ func (d *Dataset) UnmountAll(flags int) (err error) { return } -// Convert property to name +// DatasetPropertyToName convert property to name // ( returns built in string representation of property name). // This is optional, you can represent each property with string // name of choice. -func DatasetPropertyToName(p ZFSProp) (name string) { - if p == ZFSNumProps { +func DatasetPropertyToName(p Prop) (name string) { + if p == DatasetNumProps { return "numofprops" } prop := C.zfs_prop_t(p) diff --git a/zfs_test.go b/zfs_test.go index 2e4d833..a0d4412 100644 --- a/zfs_test.go +++ b/zfs_test.go @@ -2,15 +2,16 @@ package zfs_test import ( "fmt" - "github.com/bicomsystems/go-libzfs" "testing" + + "github.com/bicomsystems/go-libzfs" ) /* ------------------------------------------------------------------------- */ // HELPERS: -var TST_DATASET_PATH = TST_POOL_NAME + "/DATASET" -var TST_VOLUME_PATH = TST_DATASET_PATH + "/VOLUME" -var TST_DATASET_PATH_SNAP = TST_DATASET_PATH + "@test" +var TSTDatasetPath = TSTPoolName + "/DATASET" +var TSTVolumePath = TSTDatasetPath + "/VOLUME" +var TSTDatasetPathSnap = TSTDatasetPath + "@test" func printDatasets(ds []zfs.Dataset) error { for _, d := range ds { @@ -19,7 +20,7 @@ func printDatasets(ds []zfs.Dataset) error { if err != nil { return err } - p, err := d.GetProperty(zfs.ZFSPropType) + p, err := d.GetProperty(zfs.DatasetPropType) if err != nil { return err } @@ -36,45 +37,45 @@ func printDatasets(ds []zfs.Dataset) error { func zfsTestDatasetCreate(t *testing.T) { // reinit names used in case TESTPOOL was in conflict - TST_DATASET_PATH = TST_POOL_NAME + "/DATASET" - TST_VOLUME_PATH = TST_DATASET_PATH + "/VOLUME" - TST_DATASET_PATH_SNAP = TST_DATASET_PATH + "@test" + TSTDatasetPath = TSTPoolName + "/DATASET" + TSTVolumePath = TSTDatasetPath + "/VOLUME" + TSTDatasetPathSnap = TSTDatasetPath + "@test" - println("TEST DatasetCreate(", TST_DATASET_PATH, ") (filesystem) ... ") - props := make(map[zfs.ZFSProp]zfs.Property) - d, err := zfs.DatasetCreate(TST_DATASET_PATH, zfs.DatasetTypeFilesystem, props) + println("TEST DatasetCreate(", TSTDatasetPath, ") (filesystem) ... ") + props := make(map[zfs.Prop]zfs.Property) + d, err := zfs.DatasetCreate(TSTDatasetPath, zfs.DatasetTypeFilesystem, props) if err != nil { t.Error(err) return } d.Close() - println("PASS\n") + print("PASS\n\n") strSize := "536870912" // 512M - println("TEST DatasetCreate(", TST_VOLUME_PATH, ") (volume) ... ") - props[zfs.ZFSPropVolsize] = zfs.Property{Value: strSize} + println("TEST DatasetCreate(", TSTVolumePath, ") (volume) ... ") + props[zfs.DatasetPropVolsize] = zfs.Property{Value: strSize} // In addition I explicitly choose some more properties to be set. - props[zfs.ZFSPropVolblocksize] = zfs.Property{Value: "4096"} - props[zfs.ZFSPropReservation] = zfs.Property{Value: strSize} - d, err = zfs.DatasetCreate(TST_VOLUME_PATH, zfs.DatasetTypeVolume, props) + props[zfs.DatasetPropVolblocksize] = zfs.Property{Value: "4096"} + props[zfs.DatasetPropReservation] = zfs.Property{Value: strSize} + d, err = zfs.DatasetCreate(TSTVolumePath, zfs.DatasetTypeVolume, props) if err != nil { t.Error(err) return } d.Close() - println("PASS\n") + print("PASS\n\n") } func zfsTestDatasetOpen(t *testing.T) { - println("TEST DatasetOpen(", TST_DATASET_PATH, ") ... ") - d, err := zfs.DatasetOpen(TST_DATASET_PATH) + println("TEST DatasetOpen(", TSTDatasetPath, ") ... ") + d, err := zfs.DatasetOpen(TSTDatasetPath) if err != nil { t.Error(err) return } d.Close() - println("PASS\n") + print("PASS\n\n") } func zfsTestDatasetOpenAll(t *testing.T) { @@ -90,24 +91,24 @@ func zfsTestDatasetOpenAll(t *testing.T) { return } zfs.DatasetCloseAll(ds) - println("PASS\n") + print("PASS\n\n") } func zfsTestDatasetSnapshot(t *testing.T) { - println("TEST DatasetSnapshot(", TST_DATASET_PATH, ", true, ...) ... ") - props := make(map[zfs.ZFSProp]zfs.Property) - d, err := zfs.DatasetSnapshot(TST_DATASET_PATH_SNAP, true, props) + println("TEST DatasetSnapshot(", TSTDatasetPath, ", true, ...) ... ") + props := make(map[zfs.Prop]zfs.Property) + d, err := zfs.DatasetSnapshot(TSTDatasetPathSnap, true, props) if err != nil { t.Error(err) return } defer d.Close() - println("PASS\n") + print("PASS\n\n") } func zfsTestDatasetDestroy(t *testing.T) { - println("TEST DATASET Destroy( ", TST_DATASET_PATH, " ) ... ") - d, err := zfs.DatasetOpen(TST_DATASET_PATH) + println("TEST DATASET Destroy( ", TSTDatasetPath, " ) ... ") + d, err := zfs.DatasetOpen(TSTDatasetPath) if err != nil { t.Error(err) return @@ -117,7 +118,7 @@ func zfsTestDatasetDestroy(t *testing.T) { t.Error(err) return } - println("PASS\n") + print("PASS\n\n") } /* ------------------------------------------------------------------------- */ @@ -128,7 +129,7 @@ func ExampleDatasetCreate() { // Create map to represent ZFS dataset properties. This is equivalent to // list of properties you can get from ZFS CLI tool, and some more // internally used by libzfs. - props := make(map[zfs.ZFSProp]zfs.Property) + props := make(map[zfs.Prop]zfs.Property) // I choose to create (block) volume 1GiB in size. Size is just ZFS dataset // property and this is done as map of strings. So, You have to either @@ -136,10 +137,10 @@ func ExampleDatasetCreate() { // similar to convert in to string (base 10) from numeric type. strSize := "1073741824" - props[zfs.ZFSPropVolsize] = zfs.Property{Value: strSize} + props[zfs.DatasetPropVolsize] = zfs.Property{Value: strSize} // In addition I explicitly choose some more properties to be set. - props[zfs.ZFSPropVolblocksize] = zfs.Property{Value: "4096"} - props[zfs.ZFSPropReservation] = zfs.Property{Value: strSize} + props[zfs.DatasetPropVolblocksize] = zfs.Property{Value: "4096"} + props[zfs.DatasetPropReservation] = zfs.Property{Value: strSize} // Lets create desired volume d, err := zfs.DatasetCreate("TESTPOOL/VOLUME1", zfs.DatasetTypeVolume, props) @@ -161,10 +162,11 @@ func ExampleDatasetOpen() { } defer d.Close() var p zfs.Property - if p, err = d.GetProperty(zfs.ZFSPropAvailable); err != nil { + if p, err = d.GetProperty(zfs.DatasetPropAvailable); err != nil { panic(err.Error()) } - println(zfs.DatasetPropertyToName(zfs.ZFSPropAvailable), " = ", p.Value) + println(zfs.DatasetPropertyToName(zfs.DatasetPropAvailable), " = ", + p.Value) } func ExampleDatasetOpenAll() { @@ -180,7 +182,7 @@ func ExampleDatasetOpenAll() { if err != nil { panic(err.Error()) } - p, err := d.GetProperty(zfs.ZFSPropType) + p, err := d.GetProperty(zfs.DatasetPropType) if err != nil { panic(err.Error()) } diff --git a/zpool.go b/zpool.go index 0fb98ba..3f3fe52 100644 --- a/zpool.go +++ b/zpool.go @@ -10,16 +10,17 @@ import ( "errors" "fmt" "strconv" + "time" ) const ( msgPoolIsNil = "Pool handle not initialized or its closed" ) -type PoolProperties map[PoolProp]string -type ZFSProperties map[ZFSProp]string +// PoolProperties type is map of pool properties name -> value +type PoolProperties map[Prop]string -// Object represents handler to single ZFS pool +// Pool object represents handler to single ZFS pool // /* Pool.Properties map[string]Property */ @@ -34,11 +35,11 @@ type Pool struct { Features map[string]string } -// Open ZFS pool handler by name. +// PoolOpen open ZFS pool handler by name. // Returns Pool object, requires Pool.Close() to be called explicitly // for memory cleanup after object is not needed anymore. func PoolOpen(name string) (pool Pool, err error) { - pool.list = C.zpool_list_open(libzfs_handle, C.CString(name)) + pool.list = C.zpool_list_open(libzfsHandle, C.CString(name)) if pool.list != nil { err = pool.ReloadProperties() return @@ -47,7 +48,7 @@ func PoolOpen(name string) (pool Pool, err error) { return } -// Given a list of directories to search, find and import pool with matching +// PoolImport given a list of directories to search, find and import pool with matching // name stored on disk. func PoolImport(name string, searchpaths []string) (pool Pool, err error) { errPoolList := errors.New("Failed to list pools") @@ -59,7 +60,7 @@ func PoolImport(name string, searchpaths []string) (pool Pool, err error) { C.strings_setat(cpaths, C.int(i), C.CString(path)) } - pools := C.zpool_find_import(libzfs_handle, C.int(numofp), cpaths) + pools := C.zpool_find_import(libzfsHandle, C.int(numofp), cpaths) defer C.nvlist_free(pools) elem = C.nvlist_next_nvpair(pools, elem) @@ -88,7 +89,7 @@ func PoolImport(name string, searchpaths []string) (pool Pool, err error) { return } - retcode := C.zpool_import(libzfs_handle, config, C.CString(name), nil) + retcode := C.zpool_import(libzfsHandle, config, C.CString(name), nil) if retcode != 0 { err = LastError() return @@ -97,12 +98,12 @@ func PoolImport(name string, searchpaths []string) (pool Pool, err error) { return } -// Open all active ZFS pools on current system. +// PoolOpenAll open all active ZFS pools on current system. // Returns array of Pool handlers, each have to be closed after not needed // anymore. Call Pool.Close() method. func PoolOpenAll() (pools []Pool, err error) { var pool Pool - errcode := C.zpool_list(libzfs_handle, &pool.list) + errcode := C.zpool_list(libzfsHandle, &pool.list) for pool.list != nil { err = pool.ReloadProperties() if err != nil { @@ -117,17 +118,18 @@ func PoolOpenAll() (pools []Pool, err error) { return } +// PoolCloseAll close all pools in given slice func PoolCloseAll(pools []Pool) { for _, p := range pools { p.Close() } } -// Convert property to name +// PoolPropertyToName convert property to name // ( returns built in string representation of property name). // This is optional, you can represent each property with string // name of choice. -func PoolPropertyToName(p PoolProp) (name string) { +func PoolPropertyToName(p Prop) (name string) { if p == PoolNumProps { return "numofprops" } @@ -136,15 +138,15 @@ func PoolPropertyToName(p PoolProp) (name string) { return } -// Map POOL STATE to string. +// PoolStateToName maps POOL STATE to string. func PoolStateToName(state PoolState) (name string) { ps := C.pool_state_t(state) name = C.GoString(C.zpool_pool_state_to_name(ps)) return } -// Re-read ZFS pool properties and features, refresh Pool.Properties and -// Pool.Features map +// ReloadProperties re-read ZFS pool properties and features, refresh +// Pool.Properties and Pool.Features map func (pool *Pool) ReloadProperties() (err error) { propList := C.read_zpool_properties(pool.list.zph) if propList == nil { @@ -165,15 +167,15 @@ func (pool *Pool) ReloadProperties() (err error) { "async_destroy": "disabled", "empty_bpobj": "disabled", "lz4_compress": "disabled"} - for name, _ := range pool.Features { + for name := range pool.Features { pool.GetFeature(name) } return } -// Reload and return single specified property. This also reloads requested +// GetProperty reload and return single specified property. This also reloads requested // property in Properties map. -func (pool *Pool) GetProperty(p PoolProp) (prop Property, err error) { +func (pool *Pool) GetProperty(p Prop) (prop Property, err error) { if pool.list != nil { // First check if property exist at all if p < PoolPropName || p > PoolNumProps { @@ -194,7 +196,7 @@ func (pool *Pool) GetProperty(p PoolProp) (prop Property, err error) { return prop, errors.New(msgPoolIsNil) } -// Reload and return single specified feature. This also reloads requested +// GetFeature reload and return single specified feature. This also reloads requested // feature in Features map. func (pool *Pool) GetFeature(name string) (value string, err error) { var fvalue [512]C.char @@ -209,10 +211,10 @@ func (pool *Pool) GetFeature(name string) (value string, err error) { return } -// Set ZFS pool property to value. Not all properties can be set, +// SetProperty set ZFS pool property to value. Not all properties can be set, // some can be set only at creation time and some are read only. // Always check if returned error and its description. -func (pool *Pool) SetProperty(p PoolProp, value string) (err error) { +func (pool *Pool) SetProperty(p Prop, value string) (err error) { if pool.list != nil { // First check if property exist at all if p < PoolPropName || p > PoolNumProps { @@ -241,7 +243,7 @@ func (pool *Pool) Close() { pool.list = nil } -// Get (re-read) ZFS pool name property +// Name get (re-read) ZFS pool name property func (pool *Pool) Name() (name string, err error) { if pool.list == nil { err = errors.New(msgPoolIsNil) @@ -252,7 +254,7 @@ func (pool *Pool) Name() (name string, err error) { return } -// Get ZFS pool state +// State get ZFS pool state // Return the state of the pool (ACTIVE or UNAVAILABLE) func (pool *Pool) State() (state PoolState, err error) { if pool.list == nil { @@ -263,7 +265,7 @@ func (pool *Pool) State() (state PoolState, err error) { return } -// ZFS virtual device specification +// VDevSpec ZFS virtual device specification type VDevSpec struct { Type VDevType Devices []VDevSpec // groups other devices (e.g. mirror) @@ -271,31 +273,31 @@ type VDevSpec struct { Path string } -func (self *VDevSpec) isGrouping() (grouping bool, mindevs, maxdevs int) { +func (vdev *VDevSpec) isGrouping() (grouping bool, mindevs, maxdevs int) { maxdevs = int(^uint(0) >> 1) - if self.Type == VDevTypeRaidz { + if vdev.Type == VDevTypeRaidz { grouping = true - if self.Parity == 0 { - self.Parity = 1 + if vdev.Parity == 0 { + vdev.Parity = 1 } - if self.Parity > 254 { - self.Parity = 254 + if vdev.Parity > 254 { + vdev.Parity = 254 } - mindevs = int(self.Parity) + 1 + mindevs = int(vdev.Parity) + 1 maxdevs = 255 - } else if self.Type == VDevTypeMirror { + } else if vdev.Type == VDevTypeMirror { grouping = true mindevs = 2 - } else if self.Type == VDevTypeLog || self.Type == VDevTypeSpare || self.Type == VDevTypeL2cache { + } else if vdev.Type == VDevTypeLog || vdev.Type == VDevTypeSpare || vdev.Type == VDevTypeL2cache { grouping = true mindevs = 1 } return } -func (self *VDevSpec) isLog() (r C.uint64_t) { +func (vdev *VDevSpec) isLog() (r C.uint64_t) { r = 0 - if self.Type == VDevTypeLog { + if vdev.Type == VDevTypeLog { r = 1 } return @@ -317,7 +319,7 @@ func toCPoolProperties(props PoolProperties) (cprops *C.nvlist_t) { return } -func toCZFSProperties(props ZFSProperties) (cprops *C.nvlist_t) { +func toCDatasetProperties(props DatasetProperties) (cprops *C.nvlist_t) { cprops = nil for prop, value := range props { name := C.zfs_prop_to_name(C.zfs_prop_t(prop)) @@ -361,7 +363,7 @@ func buildVDevSpec(root *C.nvlist_t, rtype VDevType, vdevs []VDevSpec, defer C.nvlist_free_array(l2cache) for i, vdev := range vdevs { grouping, mindevs, maxdevs := vdev.isGrouping() - var child *C.nvlist_t = nil + var child *C.nvlist_t // fmt.Println(vdev.Type) if r := C.nvlist_alloc(&child, C.NV_UNIQUE_NAME, 0); r != 0 { err = errors.New("Failed to allocate vdev") @@ -369,8 +371,9 @@ func buildVDevSpec(root *C.nvlist_t, rtype VDevType, vdevs []VDevSpec, } vcount := len(vdev.Devices) if vcount < mindevs || vcount > maxdevs { - err = errors.New(fmt.Sprintf( - "Invalid vdev specification: %s supports no less than %d or more than %d devices", vdev.Type, mindevs, maxdevs)) + err = fmt.Errorf( + "Invalid vdev specification: %s supports no less than %d or more than %d devices", + vdev.Type, mindevs, maxdevs) return } if r := C.nvlist_add_string(child, C.CString(C.ZPOOL_CONFIG_TYPE), @@ -466,11 +469,11 @@ func buildVDevSpec(root *C.nvlist_t, rtype VDevType, vdevs []VDevSpec, return } -// Create ZFS pool per specs, features and properties of pool and root dataset +// PoolCreate create ZFS pool per specs, features and properties of pool and root dataset func PoolCreate(name string, vdevs []VDevSpec, features map[string]string, - props PoolProperties, fsprops ZFSProperties) (pool Pool, err error) { + props PoolProperties, fsprops DatasetProperties) (pool Pool, err error) { // create root vdev nvroot - var nvroot *C.nvlist_t = nil + var nvroot *C.nvlist_t if r := C.nvlist_alloc(&nvroot, C.NV_UNIQUE_NAME, 0); r != 0 { err = errors.New("Failed to allocate root vdev") return @@ -495,7 +498,7 @@ func PoolCreate(name string, vdevs []VDevSpec, features map[string]string, err = errors.New("Failed to allocate pool properties") return } - cfsprops := toCZFSProperties(fsprops) + cfsprops := toCDatasetProperties(fsprops) if cfsprops != nil { defer C.nvlist_free(cfsprops) } else if len(fsprops) > 0 { @@ -516,16 +519,34 @@ func PoolCreate(name string, vdevs []VDevSpec, features map[string]string, } // Create actual pool then open - if r := C.zpool_create(libzfs_handle, C.CString(name), nvroot, + if r := C.zpool_create(libzfsHandle, C.CString(name), nvroot, cprops, cfsprops); r != 0 { err = LastError() + err = errors.New(err.Error() + " (zpool_create)") return } - pool, err = PoolOpen(name) + + // It can happen that pool is not immediately available, + // we know we just created it with success so lets wait and retry + // but only in case EZFS_NOENT error + retr := 0 + for pool, err = PoolOpen(name); err != nil && retr < 3; retr++ { + errno := C.libzfs_errno(libzfsHandle) + if errno == ENoent { + time.Sleep(500 * time.Millisecond) + } else { + err = errors.New(err.Error() + " (PoolOpen)") + return + } + pool, err = PoolOpen(name) + } + if err != nil { + err = errors.New(err.Error() + " (PoolOpen)") + } return } -// Get pool status. Let you check if pool healthy. +// Status get pool status. Let you check if pool healthy. func (pool *Pool) Status() (status PoolStatus, err error) { var msgid *C.char var reason C.zpool_status_t @@ -554,22 +575,22 @@ func (pool *Pool) Destroy(logStr string) (err error) { return } -// Exports the pool from the system. +// Export exports the pool from the system. // Before exporting the pool, all datasets within the pool are unmounted. // A pool can not be exported if it has a shared spare that is currently // being used. func (pool *Pool) Export(force bool, log string) (err error) { - var force_t C.boolean_t = 0 + var forcet C.boolean_t if force { - force_t = 1 + forcet = 1 } - if rc := C.zpool_export(pool.list.zph, force_t, C.CString(log)); rc != 0 { + if rc := C.zpool_export(pool.list.zph, forcet, C.CString(log)); rc != 0 { err = LastError() } return } -// Hard force +// ExportForce hard force export of the pool from the system. func (pool *Pool) ExportForce(log string) (err error) { if rc := C.zpool_export_force(pool.list.zph, C.CString(log)); rc != 0 { err = LastError() diff --git a/zpool_test.go b/zpool_test.go index 5cc3079..cc27780 100644 --- a/zpool_test.go +++ b/zpool_test.go @@ -2,16 +2,17 @@ package zfs_test import ( "fmt" - "github.com/bicomsystems/go-libzfs" "io/ioutil" "os" "testing" + + "github.com/bicomsystems/go-libzfs" ) /* ------------------------------------------------------------------------- */ // HELPERS: -var TST_POOL_NAME = "TESTPOOL" +var TSTPoolName = "TESTPOOL" func CreateTmpSparse(prefix string, size int64) (path string, err error) { sf, err := ioutil.TempFile("/tmp", prefix) @@ -66,12 +67,12 @@ func zpoolTestPoolCreate(t *testing.T) { // first check if pool with same name already exist // we don't want conflict for { - p, err := zfs.PoolOpen(TST_POOL_NAME) + p, err := zfs.PoolOpen(TSTPoolName) if err != nil { break } p.Close() - TST_POOL_NAME += "0" + TSTPoolName += "0" } var err error @@ -94,15 +95,15 @@ func zpoolTestPoolCreate(t *testing.T) { zfs.VDevSpec{Type: zfs.VDevTypeSpare, Devices: sdevs}, } - props := make(map[zfs.PoolProp]string) - fsprops := make(map[zfs.ZFSProp]string) + props := make(map[zfs.Prop]string) + fsprops := make(map[zfs.Prop]string) features := make(map[string]string) - fsprops[zfs.ZFSPropMountpoint] = "none" + fsprops[zfs.DatasetPropMountpoint] = "none" features["async_destroy"] = "enabled" features["empty_bpobj"] = "enabled" features["lz4_compress"] = "enabled" - pool, err := zfs.PoolCreate(TST_POOL_NAME, vdevs, features, props, fsprops) + pool, err := zfs.PoolCreate(TSTPoolName, vdevs, features, props, fsprops) if err != nil { t.Error(err) // try cleanup @@ -113,7 +114,7 @@ func zpoolTestPoolCreate(t *testing.T) { } defer pool.Close() - println("PASS\n") + print("PASS\n\n") } // Open and list all pools and them state on the system @@ -143,22 +144,22 @@ func zpoolTestPoolOpenAll(t *testing.T) { println("\tPool: ", pname, " state: ", pstate) p.Close() } - println("PASS\n") + print("PASS\n\n") } func zpoolTestPoolDestroy(t *testing.T) { - println("TEST POOL Destroy( ", TST_POOL_NAME, " ) ... ") - p, err := zfs.PoolOpen(TST_POOL_NAME) + println("TEST POOL Destroy( ", TSTPoolName, " ) ... ") + p, err := zfs.PoolOpen(TSTPoolName) if err != nil { t.Error(err) return } defer p.Close() - if err = p.Destroy("Test of pool destroy (" + TST_POOL_NAME + ")"); err != nil { + if err = p.Destroy("Test of pool destroy (" + TSTPoolName + ")"); err != nil { t.Error(err.Error()) return } - println("PASS\n") + print("PASS\n\n") } func zpoolTestFailPoolOpen(t *testing.T) { @@ -166,7 +167,7 @@ func zpoolTestFailPoolOpen(t *testing.T) { pname := "fail to open this pool" p, err := zfs.PoolOpen(pname) if err != nil { - println("PASS\n") + print("PASS\n\n") return } t.Error("PoolOpen pass when it should fail") @@ -174,43 +175,43 @@ func zpoolTestFailPoolOpen(t *testing.T) { } func zpoolTestExport(t *testing.T) { - println("TEST POOL Export( ", TST_POOL_NAME, " ) ... ") - p, err := zfs.PoolOpen(TST_POOL_NAME) + println("TEST POOL Export( ", TSTPoolName, " ) ... ") + p, err := zfs.PoolOpen(TSTPoolName) if err != nil { t.Error(err) return } p.Export(false, "Test exporting pool") defer p.Close() - println("PASS\n") + print("PASS\n\n") } func zpoolTestExportForce(t *testing.T) { - println("TEST POOL ExportForce( ", TST_POOL_NAME, " ) ... ") - p, err := zfs.PoolOpen(TST_POOL_NAME) + println("TEST POOL ExportForce( ", TSTPoolName, " ) ... ") + p, err := zfs.PoolOpen(TSTPoolName) if err != nil { t.Error(err) return } p.ExportForce("Test force exporting pool") defer p.Close() - println("PASS\n") + print("PASS\n\n") } func zpoolTestImport(t *testing.T) { - println("TEST POOL Import( ", TST_POOL_NAME, " ) ... ") - p, err := zfs.PoolImport(TST_POOL_NAME, []string{"/tmp"}) + println("TEST POOL Import( ", TSTPoolName, " ) ... ") + p, err := zfs.PoolImport(TSTPoolName, []string{"/tmp"}) if err != nil { t.Error(err) return } defer p.Close() - println("PASS\n") + print("PASS\n\n") } func zpoolTestPoolProp(t *testing.T) { - println("TEST PoolProp on ", TST_POOL_NAME, " ... ") - if pool, err := zfs.PoolOpen(TST_POOL_NAME); err == nil { + println("TEST PoolProp on ", TSTPoolName, " ... ") + if pool, err := zfs.PoolOpen(TSTPoolName); err == nil { defer pool.Close() // Turn on snapshot listing for pool pool.SetProperty(zfs.PoolPropListsnaps, "on") @@ -247,12 +248,12 @@ func zpoolTestPoolProp(t *testing.T) { t.Error(err) return } - println("PASS\n") + print("PASS\n\n") } func zpoolTestPoolStatusAndState(t *testing.T) { - println("TEST pool Status/State ( ", TST_POOL_NAME, " ) ... ") - pool, err := zfs.PoolOpen(TST_POOL_NAME) + println("TEST pool Status/State ( ", TSTPoolName, " ) ... ") + pool, err := zfs.PoolOpen(TSTPoolName) if err != nil { t.Error(err.Error()) return @@ -269,9 +270,9 @@ func zpoolTestPoolStatusAndState(t *testing.T) { t.Error(err.Error()) return } - println("POOL", TST_POOL_NAME, "state:", zfs.PoolStateToName(pstate)) + println("POOL", TSTPoolName, "state:", zfs.PoolStateToName(pstate)) - println("PASS\n") + print("PASS\n\n") } /* ------------------------------------------------------------------------- */ @@ -315,7 +316,7 @@ func ExamplePoolOpenAll() { // Iterate pool properties and print name, value and source for key, prop := range p.Properties { - pkey := zfs.PoolProp(key) + pkey := zfs.Prop(key) if pkey == zfs.PoolPropName { continue // Skip name its already printed above } @@ -353,14 +354,14 @@ func ExamplePoolCreate() { } // pool properties - props := make(map[zfs.PoolProp]string) + props := make(map[zfs.Prop]string) // root dataset filesystem properties - fsprops := make(map[zfs.ZFSProp]string) + fsprops := make(map[zfs.Prop]string) // pool features features := make(map[string]string) // Turn off auto mounting by ZFS - fsprops[zfs.ZFSPropMountpoint] = "none" + fsprops[zfs.DatasetPropMountpoint] = "none" // Enable some features features["async_destroy"] = "enabled" From 4f32480fa07650bf7f05ecfe6ea18267e64f4314 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Sun, 6 Dec 2015 21:54:43 +0100 Subject: [PATCH 15/36] - Implemented PoolImportByGUID and renamed VDevSpec to VDevTree --- a_test.go | 2 +- zpool.go | 129 ++++++++++++++++++++++++++++++++++++-------------- zpool_test.go | 39 ++++++++++----- 3 files changed, 121 insertions(+), 49 deletions(-) diff --git a/a_test.go b/a_test.go index d5b8204..568c34b 100644 --- a/a_test.go +++ b/a_test.go @@ -12,7 +12,7 @@ func Test(t *testing.T) { zpoolTestExport(t) zpoolTestImport(t) zpoolTestExportForce(t) - zpoolTestImport(t) + zpoolTestImportByGUID(t) zpoolTestPoolProp(t) zpoolTestPoolStatusAndState(t) zpoolTestPoolOpenAll(t) diff --git a/zpool.go b/zpool.go index 3f3fe52..c63cc62 100644 --- a/zpool.go +++ b/zpool.go @@ -20,6 +20,14 @@ const ( // PoolProperties type is map of pool properties name -> value type PoolProperties map[Prop]string +// VDevTree ZFS virtual device tree +type VDevTree struct { + Type VDevType + Devices []VDevTree // groups other devices (e.g. mirror) + Parity uint + Path string +} + // Pool object represents handler to single ZFS pool // /* Pool.Properties map[string]Property @@ -48,12 +56,13 @@ func PoolOpen(name string) (pool Pool, err error) { return } -// PoolImport given a list of directories to search, find and import pool with matching -// name stored on disk. -func PoolImport(name string, searchpaths []string) (pool Pool, err error) { +func poolSearchImport(q string, searchpaths []string, guid bool) (name string, + err error) { + var config *C.nvlist_t + var cname *C.char + config = nil errPoolList := errors.New("Failed to list pools") var elem *C.nvpair_t - var config *C.nvlist_t numofp := len(searchpaths) cpaths := C.alloc_strings(C.int(numofp)) for i, path := range searchpaths { @@ -65,39 +74,87 @@ func PoolImport(name string, searchpaths []string) (pool Pool, err error) { elem = C.nvlist_next_nvpair(pools, elem) for ; elem != nil; elem = C.nvlist_next_nvpair(pools, elem) { - var cname *C.char + var cq *C.char var tconfig *C.nvlist_t retcode := C.nvpair_value_nvlist(elem, &tconfig) if retcode != 0 { err = errPoolList return } - retcode = C.nvlist_lookup_string(tconfig, - C.CString(C.ZPOOL_CONFIG_POOL_NAME), &cname) - if retcode != 0 { - err = errPoolList - return - } - oname := C.GoString(cname) - if name == oname { - config = tconfig - break + if guid { + var iguid C.uint64_t + if retcode = C.nvlist_lookup_uint64(tconfig, + C.CString(C.ZPOOL_CONFIG_POOL_GUID), &iguid); retcode != 0 { + err = errPoolList + return + } + sguid := fmt.Sprint(iguid) + if q == sguid { + config = tconfig + break + } + } else { + if retcode = C.nvlist_lookup_string(tconfig, + C.CString(C.ZPOOL_CONFIG_POOL_NAME), &cq); retcode != 0 { + err = errPoolList + return + } + cname = cq + name = C.GoString(cq) + if q == name { + config = tconfig + break + } } } if config == nil { - err = errors.New("No pools to import found with name " + name) + err = fmt.Errorf("No pool found %s", q) return } - - retcode := C.zpool_import(libzfsHandle, config, C.CString(name), nil) - if retcode != 0 { + if guid { + // We need to get name so we can open pool by name + if retcode := C.nvlist_lookup_string(config, + C.CString(C.ZPOOL_CONFIG_POOL_NAME), &cname); retcode != 0 { + err = errPoolList + return + } + name = C.GoString(cname) + } + if retcode := C.zpool_import(libzfsHandle, config, cname, + nil); retcode != 0 { err = LastError() return } + return +} + +// PoolImport given a list of directories to search, find and import pool with matching +// name stored on disk. +func PoolImport(name string, searchpaths []string) (pool Pool, err error) { + _, err = poolSearchImport(name, searchpaths, false) + if err != nil { + return + } pool, err = PoolOpen(name) return } +// PoolImportByGUID given a list of directories to search, find and import pool +// with matching GUID stored on disk. +func PoolImportByGUID(guid string, searchpaths []string) (pool Pool, err error) { + var name string + name, err = poolSearchImport(guid, searchpaths, true) + if err != nil { + return + } + pool, err = PoolOpen(name) + return +} + +// func PoolList(paths []string, cache string) (pools []Pool, err error) { +// +// } + // PoolOpenAll open all active ZFS pools on current system. // Returns array of Pool handlers, each have to be closed after not needed // anymore. Call Pool.Close() method. @@ -164,9 +221,17 @@ func (pool *Pool) ReloadProperties() (err error) { // read features pool.Features = map[string]string{ - "async_destroy": "disabled", - "empty_bpobj": "disabled", - "lz4_compress": "disabled"} + "async_destroy": "disabled", + "empty_bpobj": "disabled", + "lz4_compress": "disabled", + "spacemap_histogram": "disabled", + "enabled_txg": "disabled", + "hole_birth": "disabled", + "extensible_dataset": "disabled", + "embedded_data": "disabled", + "bookmarks": "disabled", + "filesystem_limits": "disabled", + "large_blocks": "disabled"} for name := range pool.Features { pool.GetFeature(name) } @@ -265,15 +330,7 @@ func (pool *Pool) State() (state PoolState, err error) { return } -// VDevSpec ZFS virtual device specification -type VDevSpec struct { - Type VDevType - Devices []VDevSpec // groups other devices (e.g. mirror) - Parity uint - Path string -} - -func (vdev *VDevSpec) isGrouping() (grouping bool, mindevs, maxdevs int) { +func (vdev *VDevTree) isGrouping() (grouping bool, mindevs, maxdevs int) { maxdevs = int(^uint(0) >> 1) if vdev.Type == VDevTypeRaidz { grouping = true @@ -295,7 +352,7 @@ func (vdev *VDevSpec) isGrouping() (grouping bool, mindevs, maxdevs int) { return } -func (vdev *VDevSpec) isLog() (r C.uint64_t) { +func (vdev *VDevTree) isLog() (r C.uint64_t) { r = 0 if vdev.Type == VDevTypeLog { r = 1 @@ -335,7 +392,7 @@ func toCDatasetProperties(props DatasetProperties) (cprops *C.nvlist_t) { return } -func buildVDevSpec(root *C.nvlist_t, rtype VDevType, vdevs []VDevSpec, +func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, props PoolProperties) (err error) { count := len(vdevs) if count == 0 { @@ -396,7 +453,7 @@ func buildVDevSpec(root *C.nvlist_t, rtype VDevType, vdevs []VDevSpec, return } } - if err = buildVDevSpec(child, vdev.Type, vdev.Devices, + if err = buildVDevTree(child, vdev.Type, vdev.Devices, props); err != nil { return } @@ -470,7 +527,7 @@ func buildVDevSpec(root *C.nvlist_t, rtype VDevType, vdevs []VDevSpec, } // PoolCreate create ZFS pool per specs, features and properties of pool and root dataset -func PoolCreate(name string, vdevs []VDevSpec, features map[string]string, +func PoolCreate(name string, vdevs []VDevTree, features map[string]string, props PoolProperties, fsprops DatasetProperties) (pool Pool, err error) { // create root vdev nvroot var nvroot *C.nvlist_t @@ -486,7 +543,7 @@ func PoolCreate(name string, vdevs []VDevSpec, features map[string]string, defer C.nvlist_free(nvroot) // Now we need to build specs (vdev hierarchy) - if err = buildVDevSpec(nvroot, VDevTypeRoot, vdevs, props); err != nil { + if err = buildVDevTree(nvroot, VDevTypeRoot, vdevs, props); err != nil { return } diff --git a/zpool_test.go b/zpool_test.go index cc27780..f3406f5 100644 --- a/zpool_test.go +++ b/zpool_test.go @@ -13,6 +13,7 @@ import ( // HELPERS: var TSTPoolName = "TESTPOOL" +var TSTPoolGUID string func CreateTmpSparse(prefix string, size int64) (path string, err error) { sf, err := ioutil.TempFile("/tmp", prefix) @@ -83,16 +84,16 @@ func zpoolTestPoolCreate(t *testing.T) { disks := [2]string{s1path, s2path} - var vdevs, mdevs, sdevs []zfs.VDevSpec + var vdevs, mdevs, sdevs []zfs.VDevTree for _, d := range disks { mdevs = append(mdevs, - zfs.VDevSpec{Type: zfs.VDevTypeFile, Path: d}) + zfs.VDevTree{Type: zfs.VDevTypeFile, Path: d}) } - sdevs = []zfs.VDevSpec{ + sdevs = []zfs.VDevTree{ {Type: zfs.VDevTypeFile, Path: s3path}} - vdevs = []zfs.VDevSpec{ - zfs.VDevSpec{Type: zfs.VDevTypeMirror, Devices: mdevs}, - zfs.VDevSpec{Type: zfs.VDevTypeSpare, Devices: sdevs}, + vdevs = []zfs.VDevTree{ + zfs.VDevTree{Type: zfs.VDevTypeMirror, Devices: mdevs}, + zfs.VDevTree{Type: zfs.VDevTypeSpare, Devices: sdevs}, } props := make(map[zfs.Prop]string) @@ -114,6 +115,9 @@ func zpoolTestPoolCreate(t *testing.T) { } defer pool.Close() + pguid, _ := pool.GetProperty(zfs.PoolPropGUID) + TSTPoolGUID = pguid.Value + print("PASS\n\n") } @@ -209,6 +213,17 @@ func zpoolTestImport(t *testing.T) { print("PASS\n\n") } +func zpoolTestImportByGUID(t *testing.T) { + println("TEST POOL ImportByGUID( ", TSTPoolGUID, " ) ... ") + p, err := zfs.PoolImportByGUID(TSTPoolGUID, []string{"/tmp"}) + if err != nil { + t.Error(err) + return + } + defer p.Close() + print("PASS\n\n") +} + func zpoolTestPoolProp(t *testing.T) { println("TEST PoolProp on ", TSTPoolName, " ... ") if pool, err := zfs.PoolOpen(TSTPoolName); err == nil { @@ -335,22 +350,22 @@ func ExamplePoolOpenAll() { func ExamplePoolCreate() { disks := [2]string{"/dev/disk/by-id/ATA-123", "/dev/disk/by-id/ATA-456"} - var vdevs, mdevs, sdevs []zfs.VDevSpec + var vdevs, mdevs, sdevs []zfs.VDevTree // build mirror devices specs for _, d := range disks { mdevs = append(mdevs, - zfs.VDevSpec{Type: zfs.VDevTypeDisk, Path: d}) + zfs.VDevTree{Type: zfs.VDevTypeDisk, Path: d}) } // spare device specs - sdevs = []zfs.VDevSpec{ + sdevs = []zfs.VDevTree{ {Type: zfs.VDevTypeDisk, Path: "/dev/disk/by-id/ATA-789"}} // pool specs - vdevs = []zfs.VDevSpec{ - zfs.VDevSpec{Type: zfs.VDevTypeMirror, Devices: mdevs}, - zfs.VDevSpec{Type: zfs.VDevTypeSpare, Devices: sdevs}, + vdevs = []zfs.VDevTree{ + zfs.VDevTree{Type: zfs.VDevTypeMirror, Devices: mdevs}, + zfs.VDevTree{Type: zfs.VDevTypeSpare, Devices: sdevs}, } // pool properties From b49a2715c29d8320a1220dde5485a869501fb891 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Thu, 10 Dec 2015 21:29:39 +0100 Subject: [PATCH 16/36] - Implemented: Search pools available to import but not imported AND Fetch imported pool's current vdev tree --- a_test.go | 2 + common.go | 40 ++++++ zpool.c | 22 +++- zpool.go | 333 ++++++++++++++++++++++++++++++++++++++++++++++---- zpool.h | 7 ++ zpool_test.go | 60 ++++++++- 6 files changed, 438 insertions(+), 26 deletions(-) diff --git a/a_test.go b/a_test.go index 568c34b..28f9aad 100644 --- a/a_test.go +++ b/a_test.go @@ -9,7 +9,9 @@ import ( func Test(t *testing.T) { zpoolTestPoolCreate(t) + zpoolTestPoolVDevTree(t) zpoolTestExport(t) + zpoolTestPoolImportSearch(t) zpoolTestImport(t) zpoolTestExportForce(t) zpoolTestImportByGUID(t) diff --git a/common.go b/common.go index d89a3e5..002a2b5 100644 --- a/common.go +++ b/common.go @@ -57,6 +57,12 @@ type PoolStatus int // PoolState type representing pool state type PoolState uint64 +// VDevState - vdev states tye +type VDevState uint64 + +// VDevAux - vdev aux states +type VDevAux uint64 + // Property ZFS pool or dataset property value type Property struct { Value string @@ -346,3 +352,37 @@ const ( EPoolreadonly /* pool is in read-only mode */ EUnknown ) + +// vdev states are ordered from least to most healthy. +// A vdev that's VDevStateCantOpen or below is considered unusable. +const ( + VDevStateUnknown VDevState = iota // Uninitialized vdev + VDevStateClosed // Not currently open + VDevStateOffline // Not allowed to open + VDevStateRemoved // Explicitly removed from system + VDevStateCantOpen // Tried to open, but failed + VDevStateFaulted // External request to fault device + VDevStateDegraded // Replicated vdev with unhealthy kids + VDevStateHealthy // Presumed good +) + +// vdev aux states. When a vdev is in the VDevStateCantOpen state, the aux field +// of the vdev stats structure uses these constants to distinguish why. +const ( + VDevAuxNone VDevAux = iota // no error + VDevAuxOpenFailed // ldi_open_*() or vn_open() failed + VDevAuxCorruptData // bad label or disk contents + VDevAuxNoReplicas // insufficient number of replicas + VDevAuxBadGUIDSum // vdev guid sum doesn't match + VDevAuxTooSmall // vdev size is too small + VDevAuxBadLabel // the label is OK but invalid + VDevAuxVersionNewer // on-disk version is too new + VDevAuxVersionOlder // on-disk version is too old + VDevAuxUnsupFeat // unsupported features + VDevAuxSpared // hot spare used in another pool + VDevAuxErrExceeded // too many errors + VDevAuxIOFailure // experienced I/O failure + VDevAuxBadLog // cannot read log chain(s) + VDevAuxExternal // external diagnosis + VDevAuxSplitPool // vdev was split off into another pool +) diff --git a/zpool.c b/zpool.c index 54eddbf..3488531 100644 --- a/zpool.c +++ b/zpool.c @@ -1,7 +1,7 @@ /* C wrappers around some zfs calls and C in general that should simplify * using libzfs from go language, and make go code shorter and more readable. */ - + #include #include #include @@ -112,17 +112,17 @@ void zprop_source_tostr(char *dst, zprop_source_t source) { break; default: strcpy(dst, "default"); - break; + break; } } int read_zpool_property(zpool_handle_t *zh, property_list_t *list, int prop) { - + int r = 0; zprop_source_t source; - r = zpool_get_prop(zh, prop, + r = zpool_get_prop(zh, prop, list->value, INT_MAX_VALUE, &source); if (r == 0) { // strcpy(list->name, zpool_prop_to_name(prop)); @@ -366,6 +366,16 @@ add_prop_list(const char *propname, char *propval, nvlist_t **props, return (0); } +int nvlist_lookup_uint64_array_vds(nvlist_t *nv, const char *p, + vdev_stat_t **vds, uint_t *c) { + return nvlist_lookup_uint64_array(nv, p, (uint64_t**)vds, c); +} + +int nvlist_lookup_uint64_array_ps(nvlist_t *nv, const char *p, + pool_scan_stat_t **vds, uint_t *c) { + return nvlist_lookup_uint64_array(nv, p, (uint64_t**)vds, c); +} + nvlist_t** nvlist_alloc_array(int count) { return malloc(count*sizeof(nvlist_t*)); } @@ -381,3 +391,7 @@ void nvlist_free_array(nvlist_t **a) { void free_cstring(char *str) { free(str); } + +nvlist_t *nvlist_array_at(nvlist_t **a, uint_t i) { + return a[i]; +} diff --git a/zpool.go b/zpool.go index c63cc62..dca41da 100644 --- a/zpool.go +++ b/zpool.go @@ -20,12 +20,94 @@ const ( // PoolProperties type is map of pool properties name -> value type PoolProperties map[Prop]string +/* + * ZIO types. Needed to interpret vdev statistics below. + */ +const ( + ZIOTypeNull = iota + ZIOTypeRead + ZIOTypeWrite + ZIOTypeFree + ZIOTypeClaim + ZIOTypeIOCtl + ZIOTypes +) + +// Scan states +const ( + DSSNone = iota // No scan + DSSScanning // Scanning + DSSFinished // Scan finished + DSSCanceled // Scan canceled + DSSNumStates // Total number of scan states +) + +// Scan functions +const ( + PoolScanNone = iota // No scan function + PoolScanScrub // Pools is checked against errors + PoolScanResilver // Pool is resilvering + PoolScanFuncs // Number of scan functions +) + +// VDevStat - Vdev statistics. Note: all fields should be 64-bit because this +// is passed between kernel and userland as an nvlist uint64 array. +type VDevStat struct { + Timestamp time.Duration /* time since vdev load (nanoseconds)*/ + State VDevState /* vdev state */ + Aux VDevAux /* see vdev_aux_t */ + Alloc uint64 /* space allocated */ + Space uint64 /* total capacity */ + DSpace uint64 /* deflated capacity */ + RSize uint64 /* replaceable dev size */ + ESize uint64 /* expandable dev size */ + Ops [ZIOTypes]uint64 /* operation count */ + Bytes [ZIOTypes]uint64 /* bytes read/written */ + ReadErrors uint64 /* read errors */ + WriteErrors uint64 /* write errors */ + ChecksumErrors uint64 /* checksum errors */ + SelfHealed uint64 /* self-healed bytes */ + ScanRemoving uint64 /* removing? */ + ScanProcessed uint64 /* scan processed bytes */ + Fragmentation uint64 /* device fragmentation */ +} + +// PoolScanStat - Pool scan statistics +type PoolScanStat struct { + // Values stored on disk + Func uint64 // Current scan function e.g. none, scrub ... + State uint64 // Current scan state e.g. scanning, finished ... + StartTime uint64 // Scan start time + EndTime uint64 // Scan end time + ToExamine uint64 // Total bytes to scan + Examined uint64 // Total bytes scaned + ToProcess uint64 // Total bytes to processed + Processed uint64 // Total bytes processed + Errors uint64 // Scan errors + // Values not stored on disk + PassExam uint64 // Examined bytes per scan pass + PassStart uint64 // Start time of scan pass +} + // VDevTree ZFS virtual device tree type VDevTree struct { - Type VDevType - Devices []VDevTree // groups other devices (e.g. mirror) - Parity uint - Path string + Type VDevType + Devices []VDevTree // groups other devices (e.g. mirror) + Parity uint + Path string + Name string + Stat VDevStat + ScanStat PoolScanStat +} + +// ExportedPool is type representing ZFS pool available for import +type ExportedPool struct { + VDevs VDevTree + Name string + Comment string + GUID uint64 + State PoolState + Status PoolStatus } // Pool object represents handler to single ZFS pool @@ -56,6 +138,164 @@ func PoolOpen(name string) (pool Pool, err error) { return } +func poolGetConfig(name string, nv *C.nvlist_t) (vdevs VDevTree, err error) { + var dtype *C.char + var c, children C.uint_t + var notpresent C.uint64_t + var vs *C.vdev_stat_t + var ps *C.pool_scan_stat_t + var child **C.nvlist_t + var vdev VDevTree + if 0 != C.nvlist_lookup_string(nv, C.CString(C.ZPOOL_CONFIG_TYPE), &dtype) { + err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_TYPE) + return + } + vdevs.Name = name + vdevs.Type = VDevType(C.GoString(dtype)) + if vdevs.Type == VDevTypeMissing || vdevs.Type == VDevTypeHole { + return + } + + // Fetch vdev state + if 0 != C.nvlist_lookup_uint64_array_vds(nv, C.CString(C.ZPOOL_CONFIG_VDEV_STATS), + &vs, &c) { + err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_VDEV_STATS) + return + } + vdevs.Stat.Timestamp = time.Duration(vs.vs_timestamp) + vdevs.Stat.State = VDevState(vs.vs_state) + vdevs.Stat.Aux = VDevAux(vs.vs_aux) + vdevs.Stat.Alloc = uint64(vs.vs_alloc) + vdevs.Stat.Space = uint64(vs.vs_space) + vdevs.Stat.DSpace = uint64(vs.vs_dspace) + vdevs.Stat.RSize = uint64(vs.vs_rsize) + vdevs.Stat.ESize = uint64(vs.vs_esize) + for z := 0; z < ZIOTypes; z++ { + vdev.Stat.Ops[z] = uint64(vs.vs_ops[z]) + vdev.Stat.Bytes[z] = uint64(vs.vs_bytes[z]) + } + vdevs.Stat.ReadErrors = uint64(vs.vs_read_errors) + vdevs.Stat.WriteErrors = uint64(vs.vs_write_errors) + vdevs.Stat.ChecksumErrors = uint64(vs.vs_checksum_errors) + vdevs.Stat.SelfHealed = uint64(vs.vs_self_healed) + vdevs.Stat.ScanRemoving = uint64(vs.vs_scan_removing) + vdevs.Stat.ScanProcessed = uint64(vs.vs_scan_processed) + vdevs.Stat.Fragmentation = uint64(vs.vs_fragmentation) + + // Fetch vdev scan stats + if 0 == C.nvlist_lookup_uint64_array_ps(nv, C.CString(C.ZPOOL_CONFIG_SCAN_STATS), + &ps, &c) { + vdevs.ScanStat.Func = uint64(ps.pss_func) + vdevs.ScanStat.State = uint64(ps.pss_state) + vdevs.ScanStat.StartTime = uint64(ps.pss_start_time) + vdevs.ScanStat.EndTime = uint64(ps.pss_end_time) + vdevs.ScanStat.ToExamine = uint64(ps.pss_to_examine) + vdevs.ScanStat.Examined = uint64(ps.pss_examined) + vdevs.ScanStat.ToProcess = uint64(ps.pss_to_process) + vdevs.ScanStat.Processed = uint64(ps.pss_processed) + vdevs.ScanStat.Errors = uint64(ps.pss_errors) + vdevs.ScanStat.PassExam = uint64(ps.pss_pass_exam) + vdevs.ScanStat.PassStart = uint64(ps.pss_pass_start) + } + + // Fetch the children + if C.nvlist_lookup_nvlist_array(nv, C.CString(C.ZPOOL_CONFIG_CHILDREN), + &child, &children) != 0 { + return + } + if children > 0 { + vdevs.Devices = make([]VDevTree, 0, children) + } + if C.nvlist_lookup_uint64(nv, C.CString(C.ZPOOL_CONFIG_NOT_PRESENT), + ¬present) == 0 { + var path *C.char + if 0 != C.nvlist_lookup_string(nv, C.CString(C.ZPOOL_CONFIG_PATH), &path) { + err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_PATH) + return + } + vdevs.Path = C.GoString(path) + } + for c = 0; c < children; c++ { + var islog = C.uint64_t(C.B_FALSE) + + C.nvlist_lookup_uint64(C.nvlist_array_at(child, c), + C.CString(C.ZPOOL_CONFIG_IS_LOG), &islog) + if islog != C.B_FALSE { + continue + } + vname := C.zpool_vdev_name(libzfsHandle, nil, C.nvlist_array_at(child, c), + C.B_TRUE) + vdev, err = poolGetConfig(C.GoString(vname), + C.nvlist_array_at(child, c)) + C.free_cstring(vname) + if err != nil { + return + } + vdevs.Devices = append(vdevs.Devices, vdev) + } + return +} + +// PoolImportSearch - Search pools available to import but not imported. +// Returns array of found pools. +func PoolImportSearch(searchpaths []string) (epools []ExportedPool, err error) { + var config, nvroot *C.nvlist_t + var cname, msgid, comment *C.char + var poolState, guid C.uint64_t + var reason C.zpool_status_t + var errata C.zpool_errata_t + config = nil + var elem *C.nvpair_t + numofp := len(searchpaths) + cpaths := C.alloc_strings(C.int(numofp)) + for i, path := range searchpaths { + C.strings_setat(cpaths, C.int(i), C.CString(path)) + } + + pools := C.zpool_find_import(libzfsHandle, C.int(numofp), cpaths) + defer C.nvlist_free(pools) + elem = C.nvlist_next_nvpair(pools, elem) + epools = make([]ExportedPool, 0, 1) + for ; elem != nil; elem = C.nvlist_next_nvpair(pools, elem) { + ep := ExportedPool{} + if C.nvpair_value_nvlist(elem, &config) != 0 { + err = LastError() + return + } + if C.nvlist_lookup_uint64(config, C.CString(C.ZPOOL_CONFIG_POOL_STATE), + &poolState) != 0 { + err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_POOL_STATE) + return + } + ep.State = PoolState(poolState) + if C.nvlist_lookup_string(config, C.CString(C.ZPOOL_CONFIG_POOL_NAME), &cname) != 0 { + err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_POOL_NAME) + return + } + ep.Name = C.GoString(cname) + if C.nvlist_lookup_uint64(config, C.CString(C.ZPOOL_CONFIG_POOL_GUID), &guid) != 0 { + err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_POOL_GUID) + return + } + ep.GUID = uint64(guid) + reason = C.zpool_import_status(config, &msgid, &errata) + ep.Status = PoolStatus(reason) + + if C.nvlist_lookup_string(config, C.CString(C.ZPOOL_CONFIG_COMMENT), &comment) == 0 { + ep.Comment = C.GoString(comment) + } + + if C.nvlist_lookup_nvlist(config, C.CString(C.ZPOOL_CONFIG_VDEV_TREE), + &nvroot) != 0 { + err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_VDEV_TREE) + return + } + ep.VDevs, err = poolGetConfig(ep.Name, nvroot) + epools = append(epools, ep) + } + return +} + func poolSearchImport(q string, searchpaths []string, guid bool) (name string, err error) { var config *C.nvlist_t @@ -583,23 +823,8 @@ func PoolCreate(name string, vdevs []VDevTree, features map[string]string, return } - // It can happen that pool is not immediately available, - // we know we just created it with success so lets wait and retry - // but only in case EZFS_NOENT error - retr := 0 - for pool, err = PoolOpen(name); err != nil && retr < 3; retr++ { - errno := C.libzfs_errno(libzfsHandle) - if errno == ENoent { - time.Sleep(500 * time.Millisecond) - } else { - err = errors.New(err.Error() + " (PoolOpen)") - return - } - pool, err = PoolOpen(name) - } - if err != nil { - err = errors.New(err.Error() + " (PoolOpen)") - } + // Open created pool and return handle + pool, err = PoolOpen(name) return } @@ -654,3 +879,69 @@ func (pool *Pool) ExportForce(log string) (err error) { } return } + +// VDevTree - Fetch pool's current vdev tree configuration, state and stats +func (pool *Pool) VDevTree() (vdevs VDevTree, err error) { + var nvroot *C.nvlist_t + var poolName string + config := C.zpool_get_config(pool.list.zph, nil) + if config == nil { + err = fmt.Errorf("Failed zpool_get_config") + return + } + if C.nvlist_lookup_nvlist(config, C.CString(C.ZPOOL_CONFIG_VDEV_TREE), + &nvroot) != 0 { + err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_VDEV_TREE) + return + } + if poolName, err = pool.Name(); err != nil { + return + } + return poolGetConfig(poolName, nvroot) +} + +func (s PoolState) String() string { + switch s { + case PoolStateActive: + return "ACTIVE" + case PoolStateExported: + return "EXPORTED" + case PoolStateDestroyed: + return "DESTROYED" + case PoolStateSpare: + return "SPARE" + case PoolStateL2cache: + return "L2CACHE" + case PoolStateUninitialized: + return "UNINITIALIZED" + case PoolStateUnavail: + return "UNAVAILABLE" + case PoolStatePotentiallyActive: + return "POTENTIALLYACTIVE" + default: + return "UNKNOWN" + } +} + +func (s VDevState) String() string { + switch s { + case VDevStateUnknown: + return "UNINITIALIZED" + case VDevStateClosed: + return "CLOSED" + case VDevStateOffline: + return "OFFLINE" + case VDevStateRemoved: + return "REMOVED" + case VDevStateCantOpen: + return "CANT_OPEN" + case VDevStateFaulted: + return "FAULTED" + case VDevStateDegraded: + return "DEGRADED" + case VDevStateHealthy: + return "ONLINE" + default: + return "UNKNOWN" + } +} diff --git a/zpool.h b/zpool.h index 04698ae..93b6dbe 100644 --- a/zpool.h +++ b/zpool.h @@ -50,9 +50,16 @@ add_prop_list(const char *propname, char *propval, nvlist_t **props, nvlist_t** nvlist_alloc_array(int count); void nvlist_array_set(nvlist_t** a, int i, nvlist_t *item); void nvlist_free_array(nvlist_t **a); +nvlist_t *nvlist_array_at(nvlist_t **a, uint_t i); void free_cstring(char *str); +int nvlist_lookup_uint64_array_vds(nvlist_t *nv, const char *p, + vdev_stat_t **vds, uint_t *c); + +int nvlist_lookup_uint64_array_ps(nvlist_t *nv, const char *p, + pool_scan_stat_t **vds, uint_t *c); + #endif /* SERVERWARE_ZPOOL_H */ diff --git a/zpool_test.go b/zpool_test.go index f3406f5..54d16f1 100644 --- a/zpool_test.go +++ b/zpool_test.go @@ -224,6 +224,36 @@ func zpoolTestImportByGUID(t *testing.T) { print("PASS\n\n") } +func printVDevTree(vt zfs.VDevTree, pref string) { + first := pref + vt.Name + fmt.Printf("%-30s | %-10s | %-10s | %s\n", first, vt.Type, + vt.Stat.State.String(), vt.Path) + for _, v := range vt.Devices { + printVDevTree(v, " "+pref) + } +} + +func zpoolTestPoolImportSearch(t *testing.T) { + println("TEST PoolImportSearch") + pools, err := zfs.PoolImportSearch([]string{"/tmp"}) + if err != nil { + t.Error(err.Error()) + return + } + for _, p := range pools { + println() + println("---------------------------------------------------------------") + println("pool: ", p.Name) + println("guid: ", p.GUID) + println("state: ", p.State.String()) + fmt.Printf("%-30s | %-10s | %-10s | %s\n", "NAME", "TYPE", "STATE", "PATH") + println("---------------------------------------------------------------") + printVDevTree(p.VDevs, "") + + } + print("PASS\n\n") +} + func zpoolTestPoolProp(t *testing.T) { println("TEST PoolProp on ", TSTPoolName, " ... ") if pool, err := zfs.PoolOpen(TSTPoolName); err == nil { @@ -237,11 +267,19 @@ func zpoolTestPoolProp(t *testing.T) { } // Test fetching property - _, err := pool.GetProperty(zfs.PoolPropHealth) + propHealth, err := pool.GetProperty(zfs.PoolPropHealth) if err != nil { t.Error(err) return } + println("Pool property health: ", propHealth.Value) + + propGUID, err := pool.GetProperty(zfs.PoolPropGUID) + if err != nil { + t.Error(err) + return + } + println("Pool property GUID: ", propGUID.Value) // this test pool should not be bootable prop, err := pool.GetProperty(zfs.PoolPropBootfs) @@ -290,6 +328,26 @@ func zpoolTestPoolStatusAndState(t *testing.T) { print("PASS\n\n") } +func zpoolTestPoolVDevTree(t *testing.T) { + var vdevs zfs.VDevTree + println("TEST pool VDevTree ( ", TSTPoolName, " ) ... ") + pool, err := zfs.PoolOpen(TSTPoolName) + if err != nil { + t.Error(err.Error()) + return + } + defer pool.Close() + vdevs, err = pool.VDevTree() + if err != nil { + t.Error(err.Error()) + return + } + fmt.Printf("%-30s | %-10s | %-10s | %s\n", "NAME", "TYPE", "STATE", "PATH") + println("---------------------------------------------------------------") + printVDevTree(vdevs, "") + print("PASS\n\n") +} + /* ------------------------------------------------------------------------- */ // EXAMPLES: From 6b7e0db4e7c7d2c8363464b1786ab26a4c0e68ba Mon Sep 17 00:00:00 2001 From: Nick Cabatoff Date: Fri, 15 Jul 2016 19:49:43 -0400 Subject: [PATCH 17/36] Fix bug where VDevTree.Stat wasn't being populated because the wrong struct was being written to. --- zpool.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/zpool.go b/zpool.go index dca41da..75f810e 100644 --- a/zpool.go +++ b/zpool.go @@ -145,7 +145,6 @@ func poolGetConfig(name string, nv *C.nvlist_t) (vdevs VDevTree, err error) { var vs *C.vdev_stat_t var ps *C.pool_scan_stat_t var child **C.nvlist_t - var vdev VDevTree if 0 != C.nvlist_lookup_string(nv, C.CString(C.ZPOOL_CONFIG_TYPE), &dtype) { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_TYPE) return @@ -171,8 +170,8 @@ func poolGetConfig(name string, nv *C.nvlist_t) (vdevs VDevTree, err error) { vdevs.Stat.RSize = uint64(vs.vs_rsize) vdevs.Stat.ESize = uint64(vs.vs_esize) for z := 0; z < ZIOTypes; z++ { - vdev.Stat.Ops[z] = uint64(vs.vs_ops[z]) - vdev.Stat.Bytes[z] = uint64(vs.vs_bytes[z]) + vdevs.Stat.Ops[z] = uint64(vs.vs_ops[z]) + vdevs.Stat.Bytes[z] = uint64(vs.vs_bytes[z]) } vdevs.Stat.ReadErrors = uint64(vs.vs_read_errors) vdevs.Stat.WriteErrors = uint64(vs.vs_write_errors) @@ -225,6 +224,8 @@ func poolGetConfig(name string, nv *C.nvlist_t) (vdevs VDevTree, err error) { } vname := C.zpool_vdev_name(libzfsHandle, nil, C.nvlist_array_at(child, c), C.B_TRUE) + + var vdev VDevTree vdev, err = poolGetConfig(C.GoString(vname), C.nvlist_array_at(child, c)) C.free_cstring(vname) From cd589deb96817f46c204f91f966ae328739aa390 Mon Sep 17 00:00:00 2001 From: Nick Cabatoff Date: Sat, 16 Jul 2016 11:19:19 -0400 Subject: [PATCH 18/36] Add pool.RefreshStats() method, needed for iostat-like functionality. --- zpool.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/zpool.go b/zpool.go index 75f810e..3a3e975 100644 --- a/zpool.go +++ b/zpool.go @@ -443,6 +443,18 @@ func PoolStateToName(state PoolState) (name string) { return } +// Refresh the pool's vdev statistics, e.g. bytes read/written. +func (pool *Pool) RefreshStats() (err error) { + var missing C.boolean_t + if 0 != C.zpool_refresh_stats(pool.list.zph, &missing) { + return errors.New("error refreshing stats") + } + if missing == C.B_TRUE { + return errors.New("pool has gone missing") + } + return nil +} + // ReloadProperties re-read ZFS pool properties and features, refresh // Pool.Properties and Pool.Features map func (pool *Pool) ReloadProperties() (err error) { From fffecd80c2856cb8cc747899cb4d23e024f00111 Mon Sep 17 00:00:00 2001 From: Nick Cabatoff Date: Sun, 17 Jul 2016 16:52:14 -0400 Subject: [PATCH 19/36] fix a variety of CGO memory leaks; watch out for C.CString()! some leaks still present, I mostly fixed those that were impacting me directly. --- zpool.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ zpool.go | 79 +++++++++++++++++++++++++++----------------------- zpool.h | 77 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 207 insertions(+), 36 deletions(-) diff --git a/zpool.c b/zpool.c index 3488531..08a908e 100644 --- a/zpool.c +++ b/zpool.c @@ -9,6 +9,80 @@ #include "zpool.h" +char *sZPOOL_CONFIG_VERSION = ZPOOL_CONFIG_VERSION; +char *sZPOOL_CONFIG_POOL_NAME = ZPOOL_CONFIG_POOL_NAME; +char *sZPOOL_CONFIG_POOL_STATE = ZPOOL_CONFIG_POOL_STATE; +char *sZPOOL_CONFIG_POOL_TXG = ZPOOL_CONFIG_POOL_TXG; +char *sZPOOL_CONFIG_POOL_GUID = ZPOOL_CONFIG_POOL_GUID; +char *sZPOOL_CONFIG_CREATE_TXG = ZPOOL_CONFIG_CREATE_TXG; +char *sZPOOL_CONFIG_TOP_GUID = ZPOOL_CONFIG_TOP_GUID; +char *sZPOOL_CONFIG_VDEV_TREE = ZPOOL_CONFIG_VDEV_TREE; +char *sZPOOL_CONFIG_TYPE = ZPOOL_CONFIG_TYPE; +char *sZPOOL_CONFIG_CHILDREN = ZPOOL_CONFIG_CHILDREN; +char *sZPOOL_CONFIG_ID = ZPOOL_CONFIG_ID; +char *sZPOOL_CONFIG_GUID = ZPOOL_CONFIG_GUID; +char *sZPOOL_CONFIG_PATH = ZPOOL_CONFIG_PATH; +char *sZPOOL_CONFIG_DEVID = ZPOOL_CONFIG_DEVID; +char *sZPOOL_CONFIG_METASLAB_ARRAY = ZPOOL_CONFIG_METASLAB_ARRAY; +char *sZPOOL_CONFIG_METASLAB_SHIFT = ZPOOL_CONFIG_METASLAB_SHIFT; +char *sZPOOL_CONFIG_ASHIFT = ZPOOL_CONFIG_ASHIFT; +char *sZPOOL_CONFIG_ASIZE = ZPOOL_CONFIG_ASIZE; +char *sZPOOL_CONFIG_DTL = ZPOOL_CONFIG_DTL; +char *sZPOOL_CONFIG_SCAN_STATS = ZPOOL_CONFIG_SCAN_STATS; +char *sZPOOL_CONFIG_VDEV_STATS = ZPOOL_CONFIG_VDEV_STATS; +char *sZPOOL_CONFIG_WHOLE_DISK = ZPOOL_CONFIG_WHOLE_DISK; +char *sZPOOL_CONFIG_ERRCOUNT = ZPOOL_CONFIG_ERRCOUNT; +char *sZPOOL_CONFIG_NOT_PRESENT = ZPOOL_CONFIG_NOT_PRESENT; +char *sZPOOL_CONFIG_SPARES = ZPOOL_CONFIG_SPARES; +char *sZPOOL_CONFIG_IS_SPARE = ZPOOL_CONFIG_IS_SPARE; +char *sZPOOL_CONFIG_NPARITY = ZPOOL_CONFIG_NPARITY; +char *sZPOOL_CONFIG_HOSTID = ZPOOL_CONFIG_HOSTID; +char *sZPOOL_CONFIG_HOSTNAME = ZPOOL_CONFIG_HOSTNAME; +char *sZPOOL_CONFIG_LOADED_TIME = ZPOOL_CONFIG_LOADED_TIME; +char *sZPOOL_CONFIG_UNSPARE = ZPOOL_CONFIG_UNSPARE; +char *sZPOOL_CONFIG_PHYS_PATH = ZPOOL_CONFIG_PHYS_PATH; +char *sZPOOL_CONFIG_IS_LOG = ZPOOL_CONFIG_IS_LOG; +char *sZPOOL_CONFIG_L2CACHE = ZPOOL_CONFIG_L2CACHE; +char *sZPOOL_CONFIG_HOLE_ARRAY = ZPOOL_CONFIG_HOLE_ARRAY; +char *sZPOOL_CONFIG_VDEV_CHILDREN = ZPOOL_CONFIG_VDEV_CHILDREN; +char *sZPOOL_CONFIG_IS_HOLE = ZPOOL_CONFIG_IS_HOLE; +char *sZPOOL_CONFIG_DDT_HISTOGRAM = ZPOOL_CONFIG_DDT_HISTOGRAM; +char *sZPOOL_CONFIG_DDT_OBJ_STATS = ZPOOL_CONFIG_DDT_OBJ_STATS; +char *sZPOOL_CONFIG_DDT_STATS = ZPOOL_CONFIG_DDT_STATS; +char *sZPOOL_CONFIG_SPLIT = ZPOOL_CONFIG_SPLIT; +char *sZPOOL_CONFIG_ORIG_GUID = ZPOOL_CONFIG_ORIG_GUID; +char *sZPOOL_CONFIG_SPLIT_GUID = ZPOOL_CONFIG_SPLIT_GUID; +char *sZPOOL_CONFIG_SPLIT_LIST = ZPOOL_CONFIG_SPLIT_LIST; +char *sZPOOL_CONFIG_REMOVING = ZPOOL_CONFIG_REMOVING; +char *sZPOOL_CONFIG_RESILVER_TXG = ZPOOL_CONFIG_RESILVER_TXG; +char *sZPOOL_CONFIG_COMMENT = ZPOOL_CONFIG_COMMENT; +char *sZPOOL_CONFIG_SUSPENDED = ZPOOL_CONFIG_SUSPENDED; +char *sZPOOL_CONFIG_TIMESTAMP = ZPOOL_CONFIG_TIMESTAMP; +char *sZPOOL_CONFIG_BOOTFS = ZPOOL_CONFIG_BOOTFS; +char *sZPOOL_CONFIG_MISSING_DEVICES = ZPOOL_CONFIG_MISSING_DEVICES; +char *sZPOOL_CONFIG_LOAD_INFO = ZPOOL_CONFIG_LOAD_INFO; +char *sZPOOL_CONFIG_REWIND_INFO = ZPOOL_CONFIG_REWIND_INFO; +char *sZPOOL_CONFIG_UNSUP_FEAT = ZPOOL_CONFIG_UNSUP_FEAT; +char *sZPOOL_CONFIG_ENABLED_FEAT = ZPOOL_CONFIG_ENABLED_FEAT; +char *sZPOOL_CONFIG_CAN_RDONLY = ZPOOL_CONFIG_CAN_RDONLY; +char *sZPOOL_CONFIG_FEATURES_FOR_READ = ZPOOL_CONFIG_FEATURES_FOR_READ; +char *sZPOOL_CONFIG_FEATURE_STATS = ZPOOL_CONFIG_FEATURE_STATS; +char *sZPOOL_CONFIG_ERRATA = ZPOOL_CONFIG_ERRATA; +char *sZPOOL_CONFIG_OFFLINE = ZPOOL_CONFIG_OFFLINE; +char *sZPOOL_CONFIG_FAULTED = ZPOOL_CONFIG_FAULTED; +char *sZPOOL_CONFIG_DEGRADED = ZPOOL_CONFIG_DEGRADED; +char *sZPOOL_CONFIG_REMOVED = ZPOOL_CONFIG_REMOVED; +char *sZPOOL_CONFIG_FRU = ZPOOL_CONFIG_FRU; +char *sZPOOL_CONFIG_AUX_STATE = ZPOOL_CONFIG_AUX_STATE; +char *sZPOOL_REWIND_POLICY = ZPOOL_REWIND_POLICY; +char *sZPOOL_REWIND_REQUEST = ZPOOL_REWIND_REQUEST; +char *sZPOOL_REWIND_REQUEST_TXG = ZPOOL_REWIND_REQUEST_TXG; +char *sZPOOL_REWIND_META_THRESH = ZPOOL_REWIND_META_THRESH; +char *sZPOOL_REWIND_DATA_THRESH = ZPOOL_REWIND_DATA_THRESH; +char *sZPOOL_CONFIG_LOAD_TIME = ZPOOL_CONFIG_LOAD_TIME; +char *sZPOOL_CONFIG_LOAD_DATA_ERRORS = ZPOOL_CONFIG_LOAD_DATA_ERRORS; +char *sZPOOL_CONFIG_REWIND_TIME = ZPOOL_CONFIG_REWIND_TIME; + static char _lasterr_[1024]; const char *lasterr(void) { @@ -395,3 +469,16 @@ void free_cstring(char *str) { nvlist_t *nvlist_array_at(nvlist_t **a, uint_t i) { return a[i]; } + +int refresh_stats(zpool_list_t *pool) +{ + boolean_t missing; + int err = zpool_refresh_stats(pool->zph, &missing); + if ( err != 0 ) { + return err; + } + if ( missing == B_TRUE ) { + return -1; + } + return 0; +} diff --git a/zpool.go b/zpool.go index 3a3e975..a8783bd 100644 --- a/zpool.go +++ b/zpool.go @@ -129,7 +129,10 @@ type Pool struct { // Returns Pool object, requires Pool.Close() to be called explicitly // for memory cleanup after object is not needed anymore. func PoolOpen(name string) (pool Pool, err error) { - pool.list = C.zpool_list_open(libzfsHandle, C.CString(name)) + namestr := C.CString(name) + pool.list = C.zpool_list_open(libzfsHandle, namestr) + C.free_cstring(namestr) + if pool.list != nil { err = pool.ReloadProperties() return @@ -145,7 +148,7 @@ func poolGetConfig(name string, nv *C.nvlist_t) (vdevs VDevTree, err error) { var vs *C.vdev_stat_t var ps *C.pool_scan_stat_t var child **C.nvlist_t - if 0 != C.nvlist_lookup_string(nv, C.CString(C.ZPOOL_CONFIG_TYPE), &dtype) { + if 0 != C.nvlist_lookup_string(nv, C.sZPOOL_CONFIG_TYPE, &dtype) { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_TYPE) return } @@ -156,7 +159,7 @@ func poolGetConfig(name string, nv *C.nvlist_t) (vdevs VDevTree, err error) { } // Fetch vdev state - if 0 != C.nvlist_lookup_uint64_array_vds(nv, C.CString(C.ZPOOL_CONFIG_VDEV_STATS), + if 0 != C.nvlist_lookup_uint64_array_vds(nv, C.sZPOOL_CONFIG_VDEV_STATS, &vs, &c) { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_VDEV_STATS) return @@ -182,7 +185,7 @@ func poolGetConfig(name string, nv *C.nvlist_t) (vdevs VDevTree, err error) { vdevs.Stat.Fragmentation = uint64(vs.vs_fragmentation) // Fetch vdev scan stats - if 0 == C.nvlist_lookup_uint64_array_ps(nv, C.CString(C.ZPOOL_CONFIG_SCAN_STATS), + if 0 == C.nvlist_lookup_uint64_array_ps(nv, C.sZPOOL_CONFIG_SCAN_STATS, &ps, &c) { vdevs.ScanStat.Func = uint64(ps.pss_func) vdevs.ScanStat.State = uint64(ps.pss_state) @@ -198,17 +201,17 @@ func poolGetConfig(name string, nv *C.nvlist_t) (vdevs VDevTree, err error) { } // Fetch the children - if C.nvlist_lookup_nvlist_array(nv, C.CString(C.ZPOOL_CONFIG_CHILDREN), + if C.nvlist_lookup_nvlist_array(nv, C.sZPOOL_CONFIG_CHILDREN, &child, &children) != 0 { return } if children > 0 { vdevs.Devices = make([]VDevTree, 0, children) } - if C.nvlist_lookup_uint64(nv, C.CString(C.ZPOOL_CONFIG_NOT_PRESENT), + if C.nvlist_lookup_uint64(nv, C.sZPOOL_CONFIG_NOT_PRESENT, ¬present) == 0 { var path *C.char - if 0 != C.nvlist_lookup_string(nv, C.CString(C.ZPOOL_CONFIG_PATH), &path) { + if 0 != C.nvlist_lookup_string(nv, C.sZPOOL_CONFIG_PATH, &path) { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_PATH) return } @@ -218,7 +221,7 @@ func poolGetConfig(name string, nv *C.nvlist_t) (vdevs VDevTree, err error) { var islog = C.uint64_t(C.B_FALSE) C.nvlist_lookup_uint64(C.nvlist_array_at(child, c), - C.CString(C.ZPOOL_CONFIG_IS_LOG), &islog) + C.sZPOOL_CONFIG_IS_LOG, &islog) if islog != C.B_FALSE { continue } @@ -263,18 +266,18 @@ func PoolImportSearch(searchpaths []string) (epools []ExportedPool, err error) { err = LastError() return } - if C.nvlist_lookup_uint64(config, C.CString(C.ZPOOL_CONFIG_POOL_STATE), + if C.nvlist_lookup_uint64(config, C.sZPOOL_CONFIG_POOL_STATE, &poolState) != 0 { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_POOL_STATE) return } ep.State = PoolState(poolState) - if C.nvlist_lookup_string(config, C.CString(C.ZPOOL_CONFIG_POOL_NAME), &cname) != 0 { + if C.nvlist_lookup_string(config, C.sZPOOL_CONFIG_POOL_NAME, &cname) != 0 { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_POOL_NAME) return } ep.Name = C.GoString(cname) - if C.nvlist_lookup_uint64(config, C.CString(C.ZPOOL_CONFIG_POOL_GUID), &guid) != 0 { + if C.nvlist_lookup_uint64(config, C.sZPOOL_CONFIG_POOL_GUID, &guid) != 0 { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_POOL_GUID) return } @@ -282,11 +285,11 @@ func PoolImportSearch(searchpaths []string) (epools []ExportedPool, err error) { reason = C.zpool_import_status(config, &msgid, &errata) ep.Status = PoolStatus(reason) - if C.nvlist_lookup_string(config, C.CString(C.ZPOOL_CONFIG_COMMENT), &comment) == 0 { + if C.nvlist_lookup_string(config, C.sZPOOL_CONFIG_COMMENT, &comment) == 0 { ep.Comment = C.GoString(comment) } - if C.nvlist_lookup_nvlist(config, C.CString(C.ZPOOL_CONFIG_VDEV_TREE), + if C.nvlist_lookup_nvlist(config, C.sZPOOL_CONFIG_VDEV_TREE, &nvroot) != 0 { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_VDEV_TREE) return @@ -325,7 +328,7 @@ func poolSearchImport(q string, searchpaths []string, guid bool) (name string, if guid { var iguid C.uint64_t if retcode = C.nvlist_lookup_uint64(tconfig, - C.CString(C.ZPOOL_CONFIG_POOL_GUID), &iguid); retcode != 0 { + C.sZPOOL_CONFIG_POOL_GUID, &iguid); retcode != 0 { err = errPoolList return } @@ -336,7 +339,7 @@ func poolSearchImport(q string, searchpaths []string, guid bool) (name string, } } else { if retcode = C.nvlist_lookup_string(tconfig, - C.CString(C.ZPOOL_CONFIG_POOL_NAME), &cq); retcode != 0 { + C.sZPOOL_CONFIG_POOL_NAME, &cq); retcode != 0 { err = errPoolList return } @@ -355,7 +358,7 @@ func poolSearchImport(q string, searchpaths []string, guid bool) (name string, if guid { // We need to get name so we can open pool by name if retcode := C.nvlist_lookup_string(config, - C.CString(C.ZPOOL_CONFIG_POOL_NAME), &cname); retcode != 0 { + C.sZPOOL_CONFIG_POOL_NAME, &cname); retcode != 0 { err = errPoolList return } @@ -445,13 +448,9 @@ func PoolStateToName(state PoolState) (name string) { // Refresh the pool's vdev statistics, e.g. bytes read/written. func (pool *Pool) RefreshStats() (err error) { - var missing C.boolean_t - if 0 != C.zpool_refresh_stats(pool.list.zph, &missing) { + if 0 != C.refresh_stats(pool.list) { return errors.New("error refreshing stats") } - if missing == C.B_TRUE { - return errors.New("pool has gone missing") - } return nil } @@ -486,7 +485,10 @@ func (pool *Pool) ReloadProperties() (err error) { "filesystem_limits": "disabled", "large_blocks": "disabled"} for name := range pool.Features { - pool.GetFeature(name) + _, ferr := pool.GetFeature(name) + if ferr != nil { + // tolerate it + } } return } @@ -518,8 +520,11 @@ func (pool *Pool) GetProperty(p Prop) (prop Property, err error) { // feature in Features map. func (pool *Pool) GetFeature(name string) (value string, err error) { var fvalue [512]C.char - sname := fmt.Sprint("feature@", name) - r := C.zpool_prop_get_feature(pool.list.zph, C.CString(sname), &(fvalue[0]), 512) + var sname *C.char + sname = C.CString(fmt.Sprint("feature@", name)) + r := C.zpool_prop_get_feature(pool.list.zph, sname, &(fvalue[0]), 512) + C.free_cstring(sname) + if r != 0 { err = errors.New(fmt.Sprint("Unknown zpool feature: ", name)) return @@ -686,12 +691,12 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, vdev.Type, mindevs, maxdevs) return } - if r := C.nvlist_add_string(child, C.CString(C.ZPOOL_CONFIG_TYPE), + if r := C.nvlist_add_string(child, C.sZPOOL_CONFIG_TYPE, C.CString(string(vdev.Type))); r != 0 { err = errors.New("Failed to set vdev type") return } - if r := C.nvlist_add_uint64(child, C.CString(C.ZPOOL_CONFIG_IS_LOG), + if r := C.nvlist_add_uint64(child, C.sZPOOL_CONFIG_IS_LOG, vdev.isLog()); r != 0 { err = errors.New("Failed to allocate vdev (is_log)") return @@ -699,7 +704,7 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, if grouping { if vdev.Type == VDevTypeRaidz { r := C.nvlist_add_uint64(child, - C.CString(C.ZPOOL_CONFIG_NPARITY), + C.sZPOOL_CONFIG_NPARITY, C.uint64_t(mindevs-1)) if r != 0 { err = errors.New("Failed to allocate vdev (parity)") @@ -713,14 +718,14 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, } else { // if vdev.Type == VDevTypeDisk { if r := C.nvlist_add_uint64(child, - C.CString(C.ZPOOL_CONFIG_WHOLE_DISK), 1); r != 0 { + C.sZPOOL_CONFIG_WHOLE_DISK, 1); r != 0 { err = errors.New("Failed to allocate vdev child (whdisk)") return } // } if len(vdev.Path) > 0 { if r := C.nvlist_add_string( - child, C.CString(C.ZPOOL_CONFIG_PATH), + child, C.sZPOOL_CONFIG_PATH, C.CString(vdev.Path)); r != 0 { err = errors.New("Failed to allocate vdev child (type)") return @@ -728,7 +733,7 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, ashift, _ := strconv.Atoi(props[PoolPropAshift]) if ashift > 0 { if r := C.nvlist_add_uint64(child, - C.CString(C.ZPOOL_CONFIG_ASHIFT), + C.sZPOOL_CONFIG_ASHIFT, C.uint64_t(ashift)); r != 0 { err = errors.New("Failed to allocate vdev child (ashift)") return @@ -751,7 +756,7 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, } if count > 0 { if r := C.nvlist_add_nvlist_array(root, - C.CString(C.ZPOOL_CONFIG_CHILDREN), childrens, + C.sZPOOL_CONFIG_CHILDREN, childrens, C.uint_t(count)); r != 0 { err = errors.New("Failed to allocate vdev children") return @@ -761,7 +766,7 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, } if nl2cache > 0 { if r := C.nvlist_add_nvlist_array(root, - C.CString(C.ZPOOL_CONFIG_L2CACHE), l2cache, + C.sZPOOL_CONFIG_L2CACHE, l2cache, C.uint_t(nl2cache)); r != 0 { err = errors.New("Failed to allocate vdev cache") return @@ -769,7 +774,7 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, } if nspares > 0 { if r := C.nvlist_add_nvlist_array(root, - C.CString(C.ZPOOL_CONFIG_SPARES), spares, + C.sZPOOL_CONFIG_SPARES, spares, C.uint_t(nspares)); r != 0 { err = errors.New("Failed to allocate vdev spare") return @@ -788,7 +793,7 @@ func PoolCreate(name string, vdevs []VDevTree, features map[string]string, err = errors.New("Failed to allocate root vdev") return } - if r := C.nvlist_add_string(nvroot, C.CString(C.ZPOOL_CONFIG_TYPE), + if r := C.nvlist_add_string(nvroot, C.sZPOOL_CONFIG_TYPE, C.CString(string(VDevTypeRoot))); r != 0 { err = errors.New("Failed to allocate root vdev") return @@ -887,9 +892,11 @@ func (pool *Pool) Export(force bool, log string) (err error) { // ExportForce hard force export of the pool from the system. func (pool *Pool) ExportForce(log string) (err error) { - if rc := C.zpool_export_force(pool.list.zph, C.CString(log)); rc != 0 { + logstr := C.CString(log) + if rc := C.zpool_export_force(pool.list.zph, logstr); rc != 0 { err = LastError() } + C.free_cstring(logstr) return } @@ -902,7 +909,7 @@ func (pool *Pool) VDevTree() (vdevs VDevTree, err error) { err = fmt.Errorf("Failed zpool_get_config") return } - if C.nvlist_lookup_nvlist(config, C.CString(C.ZPOOL_CONFIG_VDEV_TREE), + if C.nvlist_lookup_nvlist(config, C.sZPOOL_CONFIG_VDEV_TREE, &nvroot) != 0 { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_VDEV_TREE) return diff --git a/zpool.h b/zpool.h index 93b6dbe..b36ce3d 100644 --- a/zpool.h +++ b/zpool.h @@ -61,5 +61,82 @@ int nvlist_lookup_uint64_array_vds(nvlist_t *nv, const char *p, int nvlist_lookup_uint64_array_ps(nvlist_t *nv, const char *p, pool_scan_stat_t **vds, uint_t *c); +int refresh_stats(zpool_list_t *pool); + +char *sZPOOL_CONFIG_VERSION; +char *sZPOOL_CONFIG_POOL_NAME; +char *sZPOOL_CONFIG_POOL_STATE; +char *sZPOOL_CONFIG_POOL_TXG; +char *sZPOOL_CONFIG_POOL_GUID; +char *sZPOOL_CONFIG_CREATE_TXG; +char *sZPOOL_CONFIG_TOP_GUID; +char *sZPOOL_CONFIG_VDEV_TREE; +char *sZPOOL_CONFIG_TYPE; +char *sZPOOL_CONFIG_CHILDREN; +char *sZPOOL_CONFIG_ID; +char *sZPOOL_CONFIG_GUID; +char *sZPOOL_CONFIG_PATH; +char *sZPOOL_CONFIG_DEVID; +char *sZPOOL_CONFIG_METASLAB_ARRAY; +char *sZPOOL_CONFIG_METASLAB_SHIFT; +char *sZPOOL_CONFIG_ASHIFT; +char *sZPOOL_CONFIG_ASIZE; +char *sZPOOL_CONFIG_DTL; +char *sZPOOL_CONFIG_SCAN_STATS; +char *sZPOOL_CONFIG_VDEV_STATS; +char *sZPOOL_CONFIG_WHOLE_DISK; +char *sZPOOL_CONFIG_ERRCOUNT; +char *sZPOOL_CONFIG_NOT_PRESENT; +char *sZPOOL_CONFIG_SPARES; +char *sZPOOL_CONFIG_IS_SPARE; +char *sZPOOL_CONFIG_NPARITY; +char *sZPOOL_CONFIG_HOSTID; +char *sZPOOL_CONFIG_HOSTNAME; +char *sZPOOL_CONFIG_LOADED_TIME; +char *sZPOOL_CONFIG_UNSPARE; +char *sZPOOL_CONFIG_PHYS_PATH; +char *sZPOOL_CONFIG_IS_LOG; +char *sZPOOL_CONFIG_L2CACHE; +char *sZPOOL_CONFIG_HOLE_ARRAY; +char *sZPOOL_CONFIG_VDEV_CHILDREN; +char *sZPOOL_CONFIG_IS_HOLE; +char *sZPOOL_CONFIG_DDT_HISTOGRAM; +char *sZPOOL_CONFIG_DDT_OBJ_STATS; +char *sZPOOL_CONFIG_DDT_STATS; +char *sZPOOL_CONFIG_SPLIT; +char *sZPOOL_CONFIG_ORIG_GUID; +char *sZPOOL_CONFIG_SPLIT_GUID; +char *sZPOOL_CONFIG_SPLIT_LIST; +char *sZPOOL_CONFIG_REMOVING; +char *sZPOOL_CONFIG_RESILVER_TXG; +char *sZPOOL_CONFIG_COMMENT; +char *sZPOOL_CONFIG_SUSPENDED; +char *sZPOOL_CONFIG_TIMESTAMP; +char *sZPOOL_CONFIG_BOOTFS; +char *sZPOOL_CONFIG_MISSING_DEVICES; +char *sZPOOL_CONFIG_LOAD_INFO; +char *sZPOOL_CONFIG_REWIND_INFO; +char *sZPOOL_CONFIG_UNSUP_FEAT; +char *sZPOOL_CONFIG_ENABLED_FEAT; +char *sZPOOL_CONFIG_CAN_RDONLY; +char *sZPOOL_CONFIG_FEATURES_FOR_READ; +char *sZPOOL_CONFIG_FEATURE_STATS; +char *sZPOOL_CONFIG_ERRATA; +char *sZPOOL_CONFIG_OFFLINE; +char *sZPOOL_CONFIG_FAULTED; +char *sZPOOL_CONFIG_DEGRADED; +char *sZPOOL_CONFIG_REMOVED; +char *sZPOOL_CONFIG_FRU; +char *sZPOOL_CONFIG_AUX_STATE; +char *sZPOOL_REWIND_POLICY; +char *sZPOOL_REWIND_REQUEST; +char *sZPOOL_REWIND_REQUEST_TXG; +char *sZPOOL_REWIND_META_THRESH; +char *sZPOOL_REWIND_DATA_THRESH; +char *sZPOOL_CONFIG_LOAD_TIME; +char *sZPOOL_CONFIG_LOAD_DATA_ERRORS; +char *sZPOOL_CONFIG_REWIND_TIME; + + #endif /* SERVERWARE_ZPOOL_H */ From a6fba76e816fd3c7f316e6ed22cc86395acf1eee Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Tue, 26 Jul 2016 21:14:28 +0200 Subject: [PATCH 20/36] - Fix remaining cgo CString memory leaks --- zfs.c | 2 +- zfs.go | 40 ++++++++++++++++++------ zfs.h | 2 +- zpool.c | 4 --- zpool.go | 94 +++++++++++++++++++++++++++++++++++++------------------- zpool.h | 3 -- 6 files changed, 94 insertions(+), 51 deletions(-) diff --git a/zfs.c b/zfs.c index 46b7721..047ba48 100644 --- a/zfs.c +++ b/zfs.c @@ -87,7 +87,7 @@ int clear_last_error(libzfs_handle_t *hdl) { return 0; } -char** alloc_strings(int size) { +char** alloc_cstrings(int size) { return malloc(size*sizeof(char*)); } diff --git a/zfs.go b/zfs.go index 288fab7..82dc968 100644 --- a/zfs.go +++ b/zfs.go @@ -8,6 +8,7 @@ import "C" import ( "errors" + "unsafe" ) const ( @@ -104,7 +105,9 @@ func DatasetCloseAll(datasets []Dataset) { // DatasetOpen open dataset and all of its recursive children datasets func DatasetOpen(path string) (d Dataset, err error) { d.list = C.create_dataset_list_item() - d.list.zh = C.zfs_open(libzfsHandle, C.CString(path), 0xF) + csPath := C.CString(path) + d.list.zh = C.zfs_open(libzfsHandle, csPath, 0xF) + C.free(unsafe.Pointer(csPath)) if d.list.zh == nil { err = LastError() @@ -129,9 +132,11 @@ func datasetPropertiesTonvlist(props map[Prop]Property) ( return } for prop, value := range props { + csValue := C.CString(value.Value) r := C.nvlist_add_string( cprops, C.zfs_prop_to_name( - C.zfs_prop_t(prop)), C.CString(value.Value)) + C.zfs_prop_t(prop)), csValue) + C.free(unsafe.Pointer(csValue)) if r != 0 { err = errors.New("Failed to convert property") return @@ -150,8 +155,10 @@ func DatasetCreate(path string, dtype DatasetType, } defer C.nvlist_free(cprops) - errcode := C.zfs_create(libzfsHandle, C.CString(path), + csPath := C.CString(path) + errcode := C.zfs_create(libzfsHandle, csPath, C.zfs_type_t(dtype), cprops) + C.free(unsafe.Pointer(csPath)) if errcode != 0 { err = LastError() } @@ -179,6 +186,9 @@ func (d *Dataset) Destroy(Defer bool) (err error) { return } dsType, e := d.GetProperty(DatasetPropType) + if e != nil { + dsType.Value = err.Error() // just put error (why it didn't fetch property type) + } err = errors.New("Cannot destroy dataset " + path + ": " + dsType.Value + " has children") return @@ -276,8 +286,10 @@ func (d *Dataset) SetProperty(p Prop, value string) (err error) { err = errors.New(msgDatasetIsNil) return } + csValue := C.CString(value) errcode := C.zfs_prop_set(d.list.zh, C.zfs_prop_to_name( - C.zfs_prop_t(p)), C.CString(value)) + C.zfs_prop_t(p)), csValue) + C.free(unsafe.Pointer(csValue)) if errcode != 0 { err = LastError() } @@ -300,7 +312,9 @@ func (d *Dataset) Clone(target string, props map[Prop]Property) (rd Dataset, err return } defer C.nvlist_free(cprops) - if errc := C.zfs_clone(d.list.zh, C.CString(target), cprops); errc != 0 { + csTarget := C.CString(target) + defer C.free(unsafe.Pointer(csTarget)) + if errc := C.zfs_clone(d.list.zh, csTarget, cprops); errc != 0 { err = LastError() return } @@ -315,7 +329,9 @@ func DatasetSnapshot(path string, recur bool, props map[Prop]Property) (rd Datas return } defer C.nvlist_free(cprops) - if errc := C.zfs_snapshot(libzfsHandle, C.CString(path), booleanT(recur), cprops); errc != 0 { + csPath := C.CString(path) + defer C.free(unsafe.Pointer(csPath)) + if errc := C.zfs_snapshot(libzfsHandle, csPath, booleanT(recur), cprops); errc != 0 { err = LastError() return } @@ -348,13 +364,15 @@ func (d *Dataset) Rollback(snap *Dataset, force bool) (err error) { } // Rename dataset -func (d *Dataset) Rename(newname string, recur, +func (d *Dataset) Rename(newName string, recur, forceUnmount bool) (err error) { if d.list == nil { err = errors.New(msgDatasetIsNil) return } - if errc := C.zfs_rename(d.list.zh, C.CString(newname), + csNewName := C.CString(newName) + defer C.free(unsafe.Pointer(csNewName)) + if errc := C.zfs_rename(d.list.zh, csNewName, booleanT(recur), booleanT(forceUnmount)); errc != 0 { err = LastError() } @@ -370,7 +388,7 @@ func (d *Dataset) IsMounted() (mounted bool, where string) { return false, "" } m := C.zfs_is_mounted(d.list.zh, &cw) - defer C.free_cstring(cw) + defer C.free(unsafe.Pointer(cw)) if m != 0 { return true, C.GoString(cw) } @@ -383,7 +401,9 @@ func (d *Dataset) Mount(options string, flags int) (err error) { err = errors.New(msgDatasetIsNil) return } - if ec := C.zfs_mount(d.list.zh, C.CString(options), C.int(flags)); ec != 0 { + csOptions := C.CString(options) + defer C.free(unsafe.Pointer(csOptions)) + if ec := C.zfs_mount(d.list.zh, csOptions, C.int(flags)); ec != 0 { err = LastError() } return diff --git a/zfs.h b/zfs.h index df9403a..40a4cd5 100644 --- a/zfs.h +++ b/zfs.h @@ -24,7 +24,7 @@ int read_dataset_property(zfs_handle_t *zh, property_list_t *list, int prop); int clear_last_error(libzfs_handle_t *libzfs); -char** alloc_strings(int size); +char** alloc_cstrings(int size); void strings_setat(char **a, int at, char *v); #endif diff --git a/zpool.c b/zpool.c index 08a908e..5755312 100644 --- a/zpool.c +++ b/zpool.c @@ -462,10 +462,6 @@ void nvlist_free_array(nvlist_t **a) { free(a); } -void free_cstring(char *str) { - free(str); -} - nvlist_t *nvlist_array_at(nvlist_t **a, uint_t i) { return a[i]; } diff --git a/zpool.go b/zpool.go index a8783bd..55e221c 100644 --- a/zpool.go +++ b/zpool.go @@ -11,6 +11,7 @@ import ( "fmt" "strconv" "time" + "unsafe" ) const ( @@ -129,9 +130,9 @@ type Pool struct { // Returns Pool object, requires Pool.Close() to be called explicitly // for memory cleanup after object is not needed anymore. func PoolOpen(name string) (pool Pool, err error) { - namestr := C.CString(name) - pool.list = C.zpool_list_open(libzfsHandle, namestr) - C.free_cstring(namestr) + csName := C.CString(name) + defer C.free(unsafe.Pointer(csName)) + pool.list = C.zpool_list_open(libzfsHandle, csName) if pool.list != nil { err = pool.ReloadProperties() @@ -227,11 +228,10 @@ func poolGetConfig(name string, nv *C.nvlist_t) (vdevs VDevTree, err error) { } vname := C.zpool_vdev_name(libzfsHandle, nil, C.nvlist_array_at(child, c), C.B_TRUE) - var vdev VDevTree vdev, err = poolGetConfig(C.GoString(vname), C.nvlist_array_at(child, c)) - C.free_cstring(vname) + C.free(unsafe.Pointer(vname)) if err != nil { return } @@ -251,9 +251,12 @@ func PoolImportSearch(searchpaths []string) (epools []ExportedPool, err error) { config = nil var elem *C.nvpair_t numofp := len(searchpaths) - cpaths := C.alloc_strings(C.int(numofp)) + cpaths := C.alloc_cstrings(C.int(numofp)) + defer C.free(unsafe.Pointer(cpaths)) for i, path := range searchpaths { - C.strings_setat(cpaths, C.int(i), C.CString(path)) + csPath := C.CString(path) + defer C.free(unsafe.Pointer(csPath)) + C.strings_setat(cpaths, C.int(i), csPath) } pools := C.zpool_find_import(libzfsHandle, C.int(numofp), cpaths) @@ -308,9 +311,12 @@ func poolSearchImport(q string, searchpaths []string, guid bool) (name string, errPoolList := errors.New("Failed to list pools") var elem *C.nvpair_t numofp := len(searchpaths) - cpaths := C.alloc_strings(C.int(numofp)) + cpaths := C.alloc_cstrings(C.int(numofp)) + defer C.free(unsafe.Pointer(cpaths)) for i, path := range searchpaths { - C.strings_setat(cpaths, C.int(i), C.CString(path)) + csPath := C.CString(path) + defer C.free(unsafe.Pointer(csPath)) + C.strings_setat(cpaths, C.int(i), csPath) } pools := C.zpool_find_import(libzfsHandle, C.int(numofp), cpaths) @@ -446,7 +452,7 @@ func PoolStateToName(state PoolState) (name string) { return } -// Refresh the pool's vdev statistics, e.g. bytes read/written. +// RefreshStats the pool's vdev statistics, e.g. bytes read/written. func (pool *Pool) RefreshStats() (err error) { if 0 != C.refresh_stats(pool.list) { return errors.New("error refreshing stats") @@ -520,11 +526,9 @@ func (pool *Pool) GetProperty(p Prop) (prop Property, err error) { // feature in Features map. func (pool *Pool) GetFeature(name string) (value string, err error) { var fvalue [512]C.char - var sname *C.char - sname = C.CString(fmt.Sprint("feature@", name)) - r := C.zpool_prop_get_feature(pool.list.zph, sname, &(fvalue[0]), 512) - C.free_cstring(sname) - + csName := C.CString(fmt.Sprint("feature@", name)) + r := C.zpool_prop_get_feature(pool.list.zph, csName, &(fvalue[0]), 512) + C.free(unsafe.Pointer(csName)) if r != 0 { err = errors.New(fmt.Sprint("Unknown zpool feature: ", name)) return @@ -545,7 +549,11 @@ func (pool *Pool) SetProperty(p Prop, value string) (err error) { PoolPropertyToName(p))) return } - r := C.zpool_set_prop(pool.list.zph, C.CString(PoolPropertyToName(p)), C.CString(value)) + csPropName := C.CString(PoolPropertyToName(p)) + csPropValue := C.CString(value) + r := C.zpool_set_prop(pool.list.zph, csPropName, csPropValue) + C.free(unsafe.Pointer(csPropName)) + C.free(unsafe.Pointer(csPropValue)) if r != 0 { err = LastError() } else { @@ -622,7 +630,9 @@ func toCPoolProperties(props PoolProperties) (cprops *C.nvlist_t) { cprops = nil for prop, value := range props { name := C.zpool_prop_to_name(C.zpool_prop_t(prop)) - r := C.add_prop_list(name, C.CString(value), &cprops, C.boolean_t(1)) + csPropValue := C.CString(value) + r := C.add_prop_list(name, csPropValue, &cprops, C.boolean_t(1)) + C.free(unsafe.Pointer(csPropValue)) if r != 0 { if cprops != nil { C.nvlist_free(cprops) @@ -638,7 +648,9 @@ func toCDatasetProperties(props DatasetProperties) (cprops *C.nvlist_t) { cprops = nil for prop, value := range props { name := C.zfs_prop_to_name(C.zfs_prop_t(prop)) - r := C.add_prop_list(name, C.CString(value), &cprops, C.boolean_t(0)) + csPropValue := C.CString(value) + r := C.add_prop_list(name, csPropValue, &cprops, C.boolean_t(0)) + C.free(unsafe.Pointer(csPropValue)) if r != 0 { if cprops != nil { C.nvlist_free(cprops) @@ -691,8 +703,11 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, vdev.Type, mindevs, maxdevs) return } - if r := C.nvlist_add_string(child, C.sZPOOL_CONFIG_TYPE, - C.CString(string(vdev.Type))); r != 0 { + csType := C.CString(string(vdev.Type)) + r := C.nvlist_add_string(child, C.sZPOOL_CONFIG_TYPE, + csType) + C.free(unsafe.Pointer(csType)) + if r != 0 { err = errors.New("Failed to set vdev type") return } @@ -724,9 +739,12 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, } // } if len(vdev.Path) > 0 { - if r := C.nvlist_add_string( + csPath := C.CString(vdev.Path) + r := C.nvlist_add_string( child, C.sZPOOL_CONFIG_PATH, - C.CString(vdev.Path)); r != 0 { + csPath) + C.free(unsafe.Pointer(csPath)) + if r != 0 { err = errors.New("Failed to allocate vdev child (type)") return } @@ -793,8 +811,11 @@ func PoolCreate(name string, vdevs []VDevTree, features map[string]string, err = errors.New("Failed to allocate root vdev") return } - if r := C.nvlist_add_string(nvroot, C.sZPOOL_CONFIG_TYPE, - C.CString(string(VDevTypeRoot))); r != 0 { + csTypeRoot := C.CString(string(VDevTypeRoot)) + r := C.nvlist_add_string(nvroot, C.sZPOOL_CONFIG_TYPE, + csTypeRoot) + C.free(unsafe.Pointer(csTypeRoot)) + if r != 0 { err = errors.New("Failed to allocate root vdev") return } @@ -821,9 +842,12 @@ func PoolCreate(name string, vdevs []VDevTree, features map[string]string, return } for fname, fval := range features { - sfname := fmt.Sprintf("feature@%s", fname) - r := C.add_prop_list(C.CString(sfname), C.CString(fval), &cprops, + csName := C.CString(fmt.Sprintf("feature@%s", fname)) + csVal := C.CString(fval) + r := C.add_prop_list(csName, csVal, &cprops, C.boolean_t(1)) + C.free(unsafe.Pointer(csName)) + C.free(unsafe.Pointer(csVal)) if r != 0 { if cprops != nil { C.nvlist_free(cprops) @@ -834,7 +858,9 @@ func PoolCreate(name string, vdevs []VDevTree, features map[string]string, } // Create actual pool then open - if r := C.zpool_create(libzfsHandle, C.CString(name), nvroot, + csName := C.CString(name) + defer C.free(unsafe.Pointer(csName)) + if r := C.zpool_create(libzfsHandle, csName, nvroot, cprops, cfsprops); r != 0 { err = LastError() err = errors.New(err.Error() + " (zpool_create)") @@ -868,7 +894,9 @@ func (pool *Pool) Destroy(logStr string) (err error) { err = errors.New(msgPoolIsNil) return } - retcode := C.zpool_destroy(pool.list.zph, C.CString(logStr)) + csLog := C.CString(logStr) + defer C.free(unsafe.Pointer(csLog)) + retcode := C.zpool_destroy(pool.list.zph, csLog) if retcode != 0 { err = LastError() } @@ -884,7 +912,9 @@ func (pool *Pool) Export(force bool, log string) (err error) { if force { forcet = 1 } - if rc := C.zpool_export(pool.list.zph, forcet, C.CString(log)); rc != 0 { + csLog := C.CString(log) + defer C.free(unsafe.Pointer(csLog)) + if rc := C.zpool_export(pool.list.zph, forcet, csLog); rc != 0 { err = LastError() } return @@ -892,11 +922,11 @@ func (pool *Pool) Export(force bool, log string) (err error) { // ExportForce hard force export of the pool from the system. func (pool *Pool) ExportForce(log string) (err error) { - logstr := C.CString(log) - if rc := C.zpool_export_force(pool.list.zph, logstr); rc != 0 { + csLog := C.CString(log) + defer C.free(unsafe.Pointer(csLog)) + if rc := C.zpool_export_force(pool.list.zph, csLog); rc != 0 { err = LastError() } - C.free_cstring(logstr) return } diff --git a/zpool.h b/zpool.h index b36ce3d..c45e5b7 100644 --- a/zpool.h +++ b/zpool.h @@ -52,9 +52,6 @@ void nvlist_array_set(nvlist_t** a, int i, nvlist_t *item); void nvlist_free_array(nvlist_t **a); nvlist_t *nvlist_array_at(nvlist_t **a, uint_t i); - -void free_cstring(char *str); - int nvlist_lookup_uint64_array_vds(nvlist_t *nv, const char *p, vdev_stat_t **vds, uint_t *c); From 5411c35d4b15ce99d0887e659ad51381de044bb7 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Mon, 16 Jan 2017 14:05:04 +0100 Subject: [PATCH 21/36] - Constants for feature enabled/disabled --- zpool.go | 16 ++++++++++++++++ zpool_test.go | 6 +++--- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/zpool.go b/zpool.go index 55e221c..df9b203 100644 --- a/zpool.go +++ b/zpool.go @@ -18,6 +18,12 @@ const ( msgPoolIsNil = "Pool handle not initialized or its closed" ) +// Enable or disable pool feature with this constants +const ( + FENABLED = "enabled" + FDISABLED = "disabled" +) + // PoolProperties type is map of pool properties name -> value type PoolProperties map[Prop]string @@ -826,6 +832,16 @@ func PoolCreate(name string, vdevs []VDevTree, features map[string]string, return } + // Enable 0.6.5 features per default + features["spacemap_histogram"] = FENABLED + features["enabled_txg"] = FENABLED + features["hole_birth"] = FENABLED + features["extensible_dataset"] = FENABLED + features["embedded_data"] = FENABLED + features["bookmarks"] = FENABLED + features["filesystem_limits"] = FENABLED + features["large_blocks"] = FENABLED + // convert properties cprops := toCPoolProperties(props) if cprops != nil { diff --git a/zpool_test.go b/zpool_test.go index 54d16f1..c55382c 100644 --- a/zpool_test.go +++ b/zpool_test.go @@ -100,9 +100,9 @@ func zpoolTestPoolCreate(t *testing.T) { fsprops := make(map[zfs.Prop]string) features := make(map[string]string) fsprops[zfs.DatasetPropMountpoint] = "none" - features["async_destroy"] = "enabled" - features["empty_bpobj"] = "enabled" - features["lz4_compress"] = "enabled" + features["async_destroy"] = zfs.FENABLED + features["empty_bpobj"] = zfs.FENABLED + features["lz4_compress"] = zfs.FENABLED pool, err := zfs.PoolCreate(TSTPoolName, vdevs, features, props, fsprops) if err != nil { From fe36016d7edcf6e2b4567f54ecfaa7ac5cb922c5 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Mon, 16 Jan 2017 14:07:17 +0100 Subject: [PATCH 22/36] - Set and get user property --- zfs.c | 41 +++++++++++++++++++++++++++++++++++++++++ zfs.go | 36 ++++++++++++++++++++++++++++++++++++ zfs.h | 1 + zfs_test.go | 17 ++++++++++++++++- zpool.h | 4 +++- 5 files changed, 97 insertions(+), 2 deletions(-) diff --git a/zfs.c b/zfs.c index 047ba48..f079ce6 100644 --- a/zfs.c +++ b/zfs.c @@ -82,6 +82,47 @@ int read_dataset_property(zfs_handle_t *zh, property_list_t *list, int prop) { return r; } +int read_user_property(zfs_handle_t *zh, property_list_t *list, const char *prop) { + nvlist_t *user_props = zfs_get_user_props(zh); + nvlist_t *propval; + zprop_source_t sourcetype; + char *strval; + char *sourceval; + // char source[ZFS_MAX_DATASET_NAME_LEN]; + + if (nvlist_lookup_nvlist(user_props, + prop, &propval) != 0) { + sourcetype = ZPROP_SRC_NONE; + (void) strncpy(list->source, + "none", sizeof (list->source)); + strval = "-"; + } else { + verify(nvlist_lookup_string(propval, + ZPROP_VALUE, &strval) == 0); + verify(nvlist_lookup_string(propval, + ZPROP_SOURCE, &sourceval) == 0); + + if (strcmp(sourceval, + zfs_get_name(zh)) == 0) { + sourcetype = ZPROP_SRC_LOCAL; + (void) strncpy(list->source, + "local", sizeof (list->source)); + } else if (strcmp(sourceval, + ZPROP_SOURCE_VAL_RECVD) == 0) { + sourcetype = ZPROP_SRC_RECEIVED; + (void) strncpy(list->source, + "received", sizeof (list->source)); + } else { + sourcetype = ZPROP_SRC_INHERITED; + (void) strncpy(list->source, + sourceval, sizeof (list->source)); + } + } + (void) strncpy(list->value, + strval, sizeof (list->value)); + return 0; +} + int clear_last_error(libzfs_handle_t *hdl) { zfs_standard_error(hdl, EZFS_SUCCESS, "success"); return 0; diff --git a/zfs.go b/zfs.go index 82dc968..e5a94b4 100644 --- a/zfs.go +++ b/zfs.go @@ -278,6 +278,26 @@ func (d *Dataset) GetProperty(p Prop) (prop Property, err error) { return } +func (d *Dataset) GetUserProperty(p string) (prop Property, err error) { + if d.list == nil { + err = errors.New(msgDatasetIsNil) + return + } + var plist *C.property_list_t + plist = C.new_property_list() + defer C.free_properties(plist) + csp := C.CString(p) + defer C.free(unsafe.Pointer(csp)) + errcode := C.read_user_property(d.list.zh, plist, csp) + if errcode != 0 { + err = LastError() + return + } + prop = Property{Value: C.GoString(&(*plist).value[0]), + Source: C.GoString(&(*plist).source[0])} + return +} + // SetProperty set ZFS dataset property to value. Not all properties can be set, // some can be set only at creation time and some are read only. // Always check if returned error and its description. @@ -300,6 +320,22 @@ func (d *Dataset) SetProperty(p Prop, value string) (err error) { return } +func (d *Dataset) SetUserProperty(prop, value string) (err error) { + if d.list == nil { + err = errors.New(msgDatasetIsNil) + return + } + csValue := C.CString(value) + csProp := C.CString(prop) + errcode := C.zfs_prop_set(d.list.zh, csProp, csValue) + C.free(unsafe.Pointer(csValue)) + C.free(unsafe.Pointer(csProp)) + if errcode != 0 { + err = LastError() + } + return +} + // Clone - clones the dataset. The target must be of the same type as // the source. func (d *Dataset) Clone(target string, props map[Prop]Property) (rd Dataset, err error) { diff --git a/zfs.h b/zfs.h index 40a4cd5..6497961 100644 --- a/zfs.h +++ b/zfs.h @@ -21,6 +21,7 @@ int dataset_list_children(zfs_handle_t *zfs, dataset_list_t **first); dataset_list_t *dataset_next(dataset_list_t *dataset); int read_dataset_property(zfs_handle_t *zh, property_list_t *list, int prop); +int read_user_property(zfs_handle_t *zh, property_list_t *list, const char* prop); int clear_last_error(libzfs_handle_t *libzfs); diff --git a/zfs_test.go b/zfs_test.go index a0d4412..7a64992 100644 --- a/zfs_test.go +++ b/zfs_test.go @@ -74,7 +74,22 @@ func zfsTestDatasetOpen(t *testing.T) { t.Error(err) return } - d.Close() + defer d.Close() + print("PASS\n\n") + + println("TEST Set/GetUserProperty(prop, value string) ... ") + var p zfs.Property + // Test set/get user property + if err = d.SetUserProperty("go-libzfs:test", "yes"); err != nil { + t.Error(err) + return + } + if p, err = d.GetUserProperty("go-libzfs:test"); err != nil { + t.Error(err) + return + } + println("go-libzfs:test", " = ", + p.Value) print("PASS\n\n") } diff --git a/zpool.h b/zpool.h index c45e5b7..7fbb190 100644 --- a/zpool.h +++ b/zpool.h @@ -7,6 +7,8 @@ #define INT_MAX_NAME 256 #define INT_MAX_VALUE 1024 +#define ZAP_OLDMAXVALUELEN 1024 +#define ZFS_MAX_DATASET_NAME_LEN 256 struct zpool_list { zpool_handle_t *zph; @@ -15,7 +17,7 @@ struct zpool_list { typedef struct property_list { char value[INT_MAX_VALUE]; - char source[INT_MAX_NAME]; + char source[ZFS_MAX_DATASET_NAME_LEN]; int property; void *pnext; } property_list_t; From 07270bcff85407c6fa4da3c151302837318816e3 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Thu, 23 Feb 2017 11:29:17 +0100 Subject: [PATCH 23/36] - Improved pointer handling between go and C and elimination of associated memory leaks and potential corruptions --- common.go | 2 +- zfs.c | 14 ++++++++-- zfs.go | 46 ++++++++++++++++++-------------- zfs.h | 2 ++ zpool.go | 78 +++++++++++++++++++++++++++---------------------------- zpool.h | 9 +++++++ 6 files changed, 90 insertions(+), 61 deletions(-) diff --git a/common.go b/common.go index 002a2b5..2cffd35 100644 --- a/common.go +++ b/common.go @@ -26,7 +26,7 @@ import ( // VDevType type of device in the pool type VDevType string -var libzfsHandle *C.struct_libzfs_handle +var libzfsHandle C.libzfs_handle_ptr func init() { libzfsHandle = C.libzfs_init() diff --git a/zfs.c b/zfs.c index f079ce6..9089392 100644 --- a/zfs.c +++ b/zfs.c @@ -20,6 +20,16 @@ dataset_list_t *create_dataset_list_item() { void dataset_list_close(dataset_list_t *list) { zfs_close(list->zh); free(list); + // dataset_list_free(list); +} + +void dataset_list_free(dataset_list_t *list) { + dataset_list_t *next; + while(list) { + next = list->pnext; + free(list); + list = next; + } } int dataset_list_callb(zfs_handle_t *dataset, void *data) { @@ -44,7 +54,7 @@ int dataset_list_root(libzfs_handle_t *libzfs, dataset_list_t **first) { *first = zlist; } else { *first = 0; - free(zlist); + dataset_list_free(zlist); } return err; } @@ -62,7 +72,7 @@ int dataset_list_children(zfs_handle_t *zfs, dataset_list_t **first) { *first = zlist; } else { *first = 0; - free(zlist); + dataset_list_free(zlist); } return err; } diff --git a/zfs.go b/zfs.go index e5a94b4..10a69f1 100644 --- a/zfs.go +++ b/zfs.go @@ -8,6 +8,7 @@ import "C" import ( "errors" + "fmt" "unsafe" ) @@ -36,17 +37,18 @@ const ( // Dataset - ZFS dataset object type Dataset struct { - list *C.dataset_list_t + list C.dataset_list_ptr Type DatasetType Properties map[Prop]Property Children []Dataset } func (d *Dataset) openChildren() (err error) { - var dataset Dataset + var list C.dataset_list_ptr d.Children = make([]Dataset, 0, 5) - errcode := C.dataset_list_children(d.list.zh, &(dataset.list)) - for dataset.list != nil { + errcode := C.dataset_list_children(d.list.zh, unsafe.Pointer(&list)) + for list != nil { + dataset := Dataset{list: list} dataset.Type = DatasetType(C.zfs_get_type(dataset.list.zh)) dataset.Properties = make(map[Prop]Property) err = dataset.ReloadProperties() @@ -54,7 +56,7 @@ func (d *Dataset) openChildren() (err error) { return } d.Children = append(d.Children, dataset) - dataset.list = C.dataset_next(dataset.list) + list = C.dataset_next(list) } if errcode != 0 { err = LastError() @@ -72,7 +74,7 @@ func (d *Dataset) openChildren() (err error) { // (file-systems, volumes or snapshots). func DatasetOpenAll() (datasets []Dataset, err error) { var dataset Dataset - errcode := C.dataset_list_root(libzfsHandle, &dataset.list) + errcode := C.dataset_list_root(libzfsHandle, unsafe.Pointer(&dataset.list)) for dataset.list != nil { dataset.Type = DatasetType(C.zfs_get_type(dataset.list.zh)) err = dataset.ReloadProperties() @@ -111,6 +113,9 @@ func DatasetOpen(path string) (d Dataset, err error) { if d.list.zh == nil { err = LastError() + if err == nil { + err = fmt.Errorf("dataset not found.") + } return } d.Type = DatasetType(C.zfs_get_type(d.list.zh)) @@ -124,9 +129,9 @@ func DatasetOpen(path string) (d Dataset, err error) { } func datasetPropertiesTonvlist(props map[Prop]Property) ( - cprops *C.nvlist_t, err error) { + cprops C.nvlist_ptr, err error) { // convert properties to nvlist C type - r := C.nvlist_alloc(&cprops, C.NV_UNIQUE_NAME, 0) + r := C.nvlist_alloc(unsafe.Pointer(&cprops), C.NV_UNIQUE_NAME, 0) if r != 0 { err = errors.New("Failed to allocate properties") return @@ -149,7 +154,7 @@ func datasetPropertiesTonvlist(props map[Prop]Property) ( // pool/dataset or pool/parent/dataset func DatasetCreate(path string, dtype DatasetType, props map[Prop]Property) (d Dataset, err error) { - var cprops *C.nvlist_t + var cprops C.nvlist_ptr if cprops, err = datasetPropertiesTonvlist(props); err != nil { return } @@ -170,6 +175,7 @@ func DatasetCreate(path string, dtype DatasetType, func (d *Dataset) Close() { if d.list != nil && d.list.zh != nil { C.dataset_list_close(d.list) + d.list = nil } for _, cd := range d.Children { cd.Close() @@ -242,17 +248,19 @@ func (d *Dataset) ReloadProperties() (err error) { err = errors.New(msgDatasetIsNil) return } - var plist *C.property_list_t - plist = C.new_property_list() - defer C.free_properties(plist) + var plist C.property_list_ptr + d.Properties = make(map[Prop]Property) for prop := DatasetPropType; prop < DatasetNumProps; prop++ { + plist = C.new_property_list() errcode := C.read_dataset_property(d.list.zh, plist, C.int(prop)) if errcode != 0 { + C.free_properties(plist) continue } d.Properties[prop] = Property{Value: C.GoString(&(*plist).value[0]), Source: C.GoString(&(*plist).source[0])} + C.free_properties(plist) } return } @@ -264,7 +272,7 @@ func (d *Dataset) GetProperty(p Prop) (prop Property, err error) { err = errors.New(msgDatasetIsNil) return } - var plist *C.property_list_t + var plist C.property_list_ptr plist = C.new_property_list() defer C.free_properties(plist) errcode := C.read_dataset_property(d.list.zh, plist, C.int(p)) @@ -283,7 +291,7 @@ func (d *Dataset) GetUserProperty(p string) (prop Property, err error) { err = errors.New(msgDatasetIsNil) return } - var plist *C.property_list_t + var plist C.property_list_ptr plist = C.new_property_list() defer C.free_properties(plist) csp := C.CString(p) @@ -339,7 +347,7 @@ func (d *Dataset) SetUserProperty(prop, value string) (err error) { // Clone - clones the dataset. The target must be of the same type as // the source. func (d *Dataset) Clone(target string, props map[Prop]Property) (rd Dataset, err error) { - var cprops *C.nvlist_t + var cprops C.nvlist_ptr if d.list == nil { err = errors.New(msgDatasetIsNil) return @@ -360,7 +368,7 @@ func (d *Dataset) Clone(target string, props map[Prop]Property) (rd Dataset, err // DatasetSnapshot create dataset snapshot. Set recur to true to snapshot child datasets. func DatasetSnapshot(path string, recur bool, props map[Prop]Property) (rd Dataset, err error) { - var cprops *C.nvlist_t + var cprops C.nvlist_ptr if cprops, err = datasetPropertiesTonvlist(props); err != nil { return } @@ -419,12 +427,12 @@ func (d *Dataset) Rename(newName string, recur, // sets in 'where' argument the current mountpoint, and returns true. Otherwise, // returns false. func (d *Dataset) IsMounted() (mounted bool, where string) { - var cw *C.char + var cw C.char_ptr if d.list == nil { return false, "" } - m := C.zfs_is_mounted(d.list.zh, &cw) - defer C.free(unsafe.Pointer(cw)) + m := C.zfs_is_mounted(d.list.zh, unsafe.Pointer(&cw)) + // defer C.free(cw) if m != 0 { return true, C.GoString(cw) } diff --git a/zfs.h b/zfs.h index 6497961..0885e25 100644 --- a/zfs.h +++ b/zfs.h @@ -11,10 +11,12 @@ struct dataset_list { }; typedef struct dataset_list dataset_list_t; +typedef struct dataset_list* dataset_list_ptr; dataset_list_t *create_dataset_list_item(); void dataset_list_close(dataset_list_t *list); +void dataset_list_free(dataset_list_t *list); int dataset_list_root(libzfs_handle_t *libzfs, dataset_list_t **first); int dataset_list_children(zfs_handle_t *zfs, dataset_list_t **first); diff --git a/zpool.go b/zpool.go index df9b203..a8dfc6b 100644 --- a/zpool.go +++ b/zpool.go @@ -127,7 +127,7 @@ type ExportedPool struct { // give easy access to listing all available properties. It can be refreshed // with up to date values with call to (*Pool) ReloadProperties type Pool struct { - list *C.zpool_list_t + list C.zpool_list_ptr Properties []Property Features map[string]string } @@ -148,14 +148,14 @@ func PoolOpen(name string) (pool Pool, err error) { return } -func poolGetConfig(name string, nv *C.nvlist_t) (vdevs VDevTree, err error) { - var dtype *C.char +func poolGetConfig(name string, nv C.nvlist_ptr) (vdevs VDevTree, err error) { + var dtype C.char_ptr var c, children C.uint_t var notpresent C.uint64_t - var vs *C.vdev_stat_t - var ps *C.pool_scan_stat_t - var child **C.nvlist_t - if 0 != C.nvlist_lookup_string(nv, C.sZPOOL_CONFIG_TYPE, &dtype) { + var vs C.vdev_stat_ptr + var ps C.pool_scan_stat_ptr + var child *C.nvlist_ptr + if 0 != C.nvlist_lookup_string(nv, C.sZPOOL_CONFIG_TYPE, unsafe.Pointer(&dtype)) { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_TYPE) return } @@ -167,7 +167,7 @@ func poolGetConfig(name string, nv *C.nvlist_t) (vdevs VDevTree, err error) { // Fetch vdev state if 0 != C.nvlist_lookup_uint64_array_vds(nv, C.sZPOOL_CONFIG_VDEV_STATS, - &vs, &c) { + unsafe.Pointer(&vs), &c) { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_VDEV_STATS) return } @@ -193,7 +193,7 @@ func poolGetConfig(name string, nv *C.nvlist_t) (vdevs VDevTree, err error) { // Fetch vdev scan stats if 0 == C.nvlist_lookup_uint64_array_ps(nv, C.sZPOOL_CONFIG_SCAN_STATS, - &ps, &c) { + unsafe.Pointer(&ps), &c) { vdevs.ScanStat.Func = uint64(ps.pss_func) vdevs.ScanStat.State = uint64(ps.pss_state) vdevs.ScanStat.StartTime = uint64(ps.pss_start_time) @@ -209,7 +209,7 @@ func poolGetConfig(name string, nv *C.nvlist_t) (vdevs VDevTree, err error) { // Fetch the children if C.nvlist_lookup_nvlist_array(nv, C.sZPOOL_CONFIG_CHILDREN, - &child, &children) != 0 { + unsafe.Pointer(&child), &children) != 0 { return } if children > 0 { @@ -217,8 +217,8 @@ func poolGetConfig(name string, nv *C.nvlist_t) (vdevs VDevTree, err error) { } if C.nvlist_lookup_uint64(nv, C.sZPOOL_CONFIG_NOT_PRESENT, ¬present) == 0 { - var path *C.char - if 0 != C.nvlist_lookup_string(nv, C.sZPOOL_CONFIG_PATH, &path) { + var path C.char_ptr + if 0 != C.nvlist_lookup_string(nv, C.sZPOOL_CONFIG_PATH, unsafe.Pointer(&path)) { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_PATH) return } @@ -249,13 +249,13 @@ func poolGetConfig(name string, nv *C.nvlist_t) (vdevs VDevTree, err error) { // PoolImportSearch - Search pools available to import but not imported. // Returns array of found pools. func PoolImportSearch(searchpaths []string) (epools []ExportedPool, err error) { - var config, nvroot *C.nvlist_t - var cname, msgid, comment *C.char + var config, nvroot C.nvlist_ptr + var cname, msgid, comment C.char_ptr var poolState, guid C.uint64_t var reason C.zpool_status_t var errata C.zpool_errata_t config = nil - var elem *C.nvpair_t + var elem C.nvpair_ptr numofp := len(searchpaths) cpaths := C.alloc_cstrings(C.int(numofp)) defer C.free(unsafe.Pointer(cpaths)) @@ -271,7 +271,7 @@ func PoolImportSearch(searchpaths []string) (epools []ExportedPool, err error) { epools = make([]ExportedPool, 0, 1) for ; elem != nil; elem = C.nvlist_next_nvpair(pools, elem) { ep := ExportedPool{} - if C.nvpair_value_nvlist(elem, &config) != 0 { + if C.nvpair_value_nvlist(elem, unsafe.Pointer(&config)) != 0 { err = LastError() return } @@ -281,7 +281,7 @@ func PoolImportSearch(searchpaths []string) (epools []ExportedPool, err error) { return } ep.State = PoolState(poolState) - if C.nvlist_lookup_string(config, C.sZPOOL_CONFIG_POOL_NAME, &cname) != 0 { + if C.nvlist_lookup_string(config, C.sZPOOL_CONFIG_POOL_NAME, unsafe.Pointer(&cname)) != 0 { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_POOL_NAME) return } @@ -294,12 +294,12 @@ func PoolImportSearch(searchpaths []string) (epools []ExportedPool, err error) { reason = C.zpool_import_status(config, &msgid, &errata) ep.Status = PoolStatus(reason) - if C.nvlist_lookup_string(config, C.sZPOOL_CONFIG_COMMENT, &comment) == 0 { + if C.nvlist_lookup_string(config, C.sZPOOL_CONFIG_COMMENT, unsafe.Pointer(&comment)) == 0 { ep.Comment = C.GoString(comment) } if C.nvlist_lookup_nvlist(config, C.sZPOOL_CONFIG_VDEV_TREE, - &nvroot) != 0 { + unsafe.Pointer(&nvroot)) != 0 { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_VDEV_TREE) return } @@ -311,8 +311,8 @@ func PoolImportSearch(searchpaths []string) (epools []ExportedPool, err error) { func poolSearchImport(q string, searchpaths []string, guid bool) (name string, err error) { - var config *C.nvlist_t - var cname *C.char + var config C.nvlist_ptr + var cname C.char_ptr config = nil errPoolList := errors.New("Failed to list pools") var elem *C.nvpair_t @@ -332,7 +332,7 @@ func poolSearchImport(q string, searchpaths []string, guid bool) (name string, for ; elem != nil; elem = C.nvlist_next_nvpair(pools, elem) { var cq *C.char var tconfig *C.nvlist_t - retcode := C.nvpair_value_nvlist(elem, &tconfig) + retcode := C.nvpair_value_nvlist(elem, unsafe.Pointer(&tconfig)) if retcode != 0 { err = errPoolList return @@ -351,7 +351,7 @@ func poolSearchImport(q string, searchpaths []string, guid bool) (name string, } } else { if retcode = C.nvlist_lookup_string(tconfig, - C.sZPOOL_CONFIG_POOL_NAME, &cq); retcode != 0 { + C.sZPOOL_CONFIG_POOL_NAME, unsafe.Pointer(&cq)); retcode != 0 { err = errPoolList return } @@ -370,7 +370,7 @@ func poolSearchImport(q string, searchpaths []string, guid bool) (name string, if guid { // We need to get name so we can open pool by name if retcode := C.nvlist_lookup_string(config, - C.sZPOOL_CONFIG_POOL_NAME, &cname); retcode != 0 { + C.sZPOOL_CONFIG_POOL_NAME, unsafe.Pointer(&cname)); retcode != 0 { err = errPoolList return } @@ -416,7 +416,7 @@ func PoolImportByGUID(guid string, searchpaths []string) (pool Pool, err error) // anymore. Call Pool.Close() method. func PoolOpenAll() (pools []Pool, err error) { var pool Pool - errcode := C.zpool_list(libzfsHandle, &pool.list) + errcode := C.zpool_list(libzfsHandle, unsafe.Pointer(&pool.list)) for pool.list != nil { err = pool.ReloadProperties() if err != nil { @@ -516,7 +516,7 @@ func (pool *Pool) GetProperty(p Prop) (prop Property, err error) { return } var list C.property_list_t - r := C.read_zpool_property(pool.list.zph, &list, C.int(p)) + r := C.read_zpool_property(pool.list.zph, unsafe.Pointer(&list), C.int(p)) if r != 0 { err = LastError() } @@ -632,12 +632,12 @@ func (vdev *VDevTree) isLog() (r C.uint64_t) { return } -func toCPoolProperties(props PoolProperties) (cprops *C.nvlist_t) { +func toCPoolProperties(props PoolProperties) (cprops C.nvlist_ptr) { cprops = nil for prop, value := range props { name := C.zpool_prop_to_name(C.zpool_prop_t(prop)) csPropValue := C.CString(value) - r := C.add_prop_list(name, csPropValue, &cprops, C.boolean_t(1)) + r := C.add_prop_list(name, csPropValue, unsafe.Pointer(&cprops), C.boolean_t(1)) C.free(unsafe.Pointer(csPropValue)) if r != 0 { if cprops != nil { @@ -650,12 +650,12 @@ func toCPoolProperties(props PoolProperties) (cprops *C.nvlist_t) { return } -func toCDatasetProperties(props DatasetProperties) (cprops *C.nvlist_t) { +func toCDatasetProperties(props DatasetProperties) (cprops C.nvlist_ptr) { cprops = nil for prop, value := range props { name := C.zfs_prop_to_name(C.zfs_prop_t(prop)) csPropValue := C.CString(value) - r := C.add_prop_list(name, csPropValue, &cprops, C.boolean_t(0)) + r := C.add_prop_list(name, csPropValue, unsafe.Pointer(&cprops), C.boolean_t(0)) C.free(unsafe.Pointer(csPropValue)) if r != 0 { if cprops != nil { @@ -696,9 +696,9 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, defer C.nvlist_free_array(l2cache) for i, vdev := range vdevs { grouping, mindevs, maxdevs := vdev.isGrouping() - var child *C.nvlist_t + var child C.nvlist_ptr // fmt.Println(vdev.Type) - if r := C.nvlist_alloc(&child, C.NV_UNIQUE_NAME, 0); r != 0 { + if r := C.nvlist_alloc(unsafe.Pointer(&child), C.NV_UNIQUE_NAME, 0); r != 0 { err = errors.New("Failed to allocate vdev") return } @@ -812,8 +812,8 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, func PoolCreate(name string, vdevs []VDevTree, features map[string]string, props PoolProperties, fsprops DatasetProperties) (pool Pool, err error) { // create root vdev nvroot - var nvroot *C.nvlist_t - if r := C.nvlist_alloc(&nvroot, C.NV_UNIQUE_NAME, 0); r != 0 { + var nvroot C.nvlist_ptr + if r := C.nvlist_alloc(unsafe.Pointer(&nvroot), C.NV_UNIQUE_NAME, 0); r != 0 { err = errors.New("Failed to allocate root vdev") return } @@ -860,7 +860,7 @@ func PoolCreate(name string, vdevs []VDevTree, features map[string]string, for fname, fval := range features { csName := C.CString(fmt.Sprintf("feature@%s", fname)) csVal := C.CString(fval) - r := C.add_prop_list(csName, csVal, &cprops, + r := C.add_prop_list(csName, csVal, unsafe.Pointer(&cprops), C.boolean_t(1)) C.free(unsafe.Pointer(csName)) C.free(unsafe.Pointer(csVal)) @@ -890,14 +890,14 @@ func PoolCreate(name string, vdevs []VDevTree, features map[string]string, // Status get pool status. Let you check if pool healthy. func (pool *Pool) Status() (status PoolStatus, err error) { - var msgid *C.char + var msgid C.char_ptr var reason C.zpool_status_t var errata C.zpool_errata_t if pool.list == nil { err = errors.New(msgPoolIsNil) return } - reason = C.zpool_get_status(pool.list.zph, &msgid, &errata) + reason = C.zpool_get_status(pool.list.zph, unsafe.Pointer(&msgid), &errata) status = PoolStatus(reason) return } @@ -948,7 +948,7 @@ func (pool *Pool) ExportForce(log string) (err error) { // VDevTree - Fetch pool's current vdev tree configuration, state and stats func (pool *Pool) VDevTree() (vdevs VDevTree, err error) { - var nvroot *C.nvlist_t + var nvroot C.nvlist_ptr var poolName string config := C.zpool_get_config(pool.list.zph, nil) if config == nil { @@ -956,7 +956,7 @@ func (pool *Pool) VDevTree() (vdevs VDevTree, err error) { return } if C.nvlist_lookup_nvlist(config, C.sZPOOL_CONFIG_VDEV_TREE, - &nvroot) != 0 { + unsafe.Pointer(&nvroot)) != 0 { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_VDEV_TREE) return } diff --git a/zpool.h b/zpool.h index 7fbb190..3aaf3c8 100644 --- a/zpool.h +++ b/zpool.h @@ -23,6 +23,15 @@ typedef struct property_list { } property_list_t; typedef struct zpool_list zpool_list_t; +typedef struct zpool_list* zpool_list_ptr; +typedef struct libzfs_handle* libzfs_handle_ptr; +typedef struct nvlist* nvlist_ptr; +typedef struct property_list *property_list_ptr; +typedef struct pool_scan_stat* pool_scan_stat_ptr; +typedef struct nvpair* nvpair_ptr; + +typedef struct vdev_stat* vdev_stat_ptr; +typedef char* char_ptr; property_list_t *new_property_list(); From 89e00d621882a0d6e2603f640e8c0719bf150d32 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Fri, 17 Mar 2017 14:22:17 +0100 Subject: [PATCH 24/36] - Implement dataset/volume clone promote function --- zfs.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/zfs.go b/zfs.go index 10a69f1..2be0be1 100644 --- a/zfs.go +++ b/zfs.go @@ -407,6 +407,18 @@ func (d *Dataset) Rollback(snap *Dataset, force bool) (err error) { return } +// Promote promotes dataset clone +func (d *Dataset) Promote() (err error) { + if d.list == nil { + err = errors.New(msgDatasetIsNil) + return + } + if errc := C.zfs_promote(d.list.zh); errc != 0 { + err = LastError() + } + return +} + // Rename dataset func (d *Dataset) Rename(newName string, recur, forceUnmount bool) (err error) { From fa9190091585054048f708e5d447b7db4ada7be7 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Tue, 25 Apr 2017 13:24:34 +0200 Subject: [PATCH 25/36] - Fix bug not fetching disk path in pool configuration when disk is unavailable (plugged off) --- zpool.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zpool.go b/zpool.go index a8dfc6b..940f8b0 100644 --- a/zpool.go +++ b/zpool.go @@ -210,13 +210,13 @@ func poolGetConfig(name string, nv C.nvlist_ptr) (vdevs VDevTree, err error) { // Fetch the children if C.nvlist_lookup_nvlist_array(nv, C.sZPOOL_CONFIG_CHILDREN, unsafe.Pointer(&child), &children) != 0 { - return + children = 0 } if children > 0 { vdevs.Devices = make([]VDevTree, 0, children) } if C.nvlist_lookup_uint64(nv, C.sZPOOL_CONFIG_NOT_PRESENT, - ¬present) == 0 { + ¬present) == 0 || notpresent != 0 { var path C.char_ptr if 0 != C.nvlist_lookup_string(nv, C.sZPOOL_CONFIG_PATH, unsafe.Pointer(&path)) { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_PATH) From 8fd0833477ecaa9cdeb609de0e2789f3510ac35d Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Fri, 2 Jun 2017 08:42:14 +0200 Subject: [PATCH 26/36] - Fix issue not building on go 1.8 , and some more improvements --- common.c | 56 +++++++ common.go | 13 +- common.h | 37 +++++ zfs.c | 147 ++++++++++++---- zfs.go | 119 ++++++------- zfs.h | 26 ++- zpool.c | 451 ++++++++++++++++++++++++-------------------------- zpool.go | 152 ++++++++--------- zpool.h | 64 ++++--- zpool_test.go | 2 +- 10 files changed, 599 insertions(+), 468 deletions(-) create mode 100644 common.c create mode 100644 common.h diff --git a/common.c b/common.c new file mode 100644 index 0000000..cec48de --- /dev/null +++ b/common.c @@ -0,0 +1,56 @@ +#include +#include +#include +#include + +#include "common.h" + +libzfs_handle_ptr libzfsHandle; + +int go_libzfs_init() { + libzfsHandle = libzfs_init(); + return 0; +} + +int libzfs_last_error() { + return libzfs_errno(libzfsHandle); +} + +const char *libzfs_last_error_str() { + return libzfs_error_description(libzfsHandle); +} + +int libzfs_clear_last_error() { + zfs_standard_error(libzfsHandle, EZFS_SUCCESS, "success"); + return 0; +} + +property_list_t *new_property_list() { + property_list_t *r = malloc(sizeof(property_list_t)); + memset(r, 0, sizeof(property_list_t)); + return r; +} + +void free_properties(property_list_t *root) { + if (root != 0) { + property_list_t *tmp = 0; + do { + tmp = root->pnext; + free(root); + root = tmp; + } while(tmp); + } +} + +nvlist_ptr new_property_nvlist() { + nvlist_ptr props = NULL; + int r = nvlist_alloc(&props, NV_UNIQUE_NAME, 0); + if ( r != 0 ) { + return NULL; + } + return props; +} + +int property_nvlist_add(nvlist_ptr list, const char *prop, const char *value) { + return nvlist_add_string(list, prop, value); +} diff --git a/common.go b/common.go index 2cffd35..0b0ab88 100644 --- a/common.go +++ b/common.go @@ -14,6 +14,7 @@ package zfs #include #include +#include "common.h" #include "zpool.h" #include "zfs.h" */ @@ -26,10 +27,8 @@ import ( // VDevType type of device in the pool type VDevType string -var libzfsHandle C.libzfs_handle_ptr - func init() { - libzfsHandle = C.libzfs_init() + C.go_libzfs_init() return } @@ -256,17 +255,13 @@ const ( // LastError get last underlying libzfs error description if any func LastError() (err error) { - errno := C.libzfs_errno(libzfsHandle) - if errno == 0 { - return nil - } - return errors.New(C.GoString(C.libzfs_error_description(libzfsHandle))) + return errors.New(C.GoString(C.libzfs_last_error_str())) } // ClearLastError force clear of any last error set by undeliying libzfs func ClearLastError() (err error) { err = LastError() - C.clear_last_error(libzfsHandle) + C.libzfs_clear_last_error() return } diff --git a/common.h b/common.h new file mode 100644 index 0000000..105554f --- /dev/null +++ b/common.h @@ -0,0 +1,37 @@ +/* C wrappers around some zfs calls and C in general that should simplify + * using libzfs from go language, make go code shorter and more readable. + */ + +#define INT_MAX_NAME 256 +#define INT_MAX_VALUE 1024 +#define ZAP_OLDMAXVALUELEN 1024 +#define ZFS_MAX_DATASET_NAME_LEN 256 + +typedef struct property_list { + char value[INT_MAX_VALUE]; + char source[ZFS_MAX_DATASET_NAME_LEN]; + int property; + void *pnext; +} property_list_t; + +typedef struct libzfs_handle* libzfs_handle_ptr; +typedef struct nvlist* nvlist_ptr; +typedef struct property_list *property_list_ptr; +typedef struct nvpair* nvpair_ptr; +typedef struct vdev_stat* vdev_stat_ptr; +typedef char* char_ptr; + +extern libzfs_handle_ptr libzfsHandle; + +int go_libzfs_init(); + +int libzfs_last_error(); +const char *libzfs_last_error_str(); +int libzfs_clear_last_error(); + +property_list_t *new_property_list(); +void free_properties(property_list_t *root); + +nvlist_ptr new_property_nvlist(); +int property_nvlist_add(nvlist_ptr ptr, const char* prop, const char *value); + diff --git a/zfs.c b/zfs.c index 9089392..a54affc 100644 --- a/zfs.c +++ b/zfs.c @@ -7,6 +7,7 @@ #include #include +#include "common.h" #include "zpool.h" #include "zfs.h" @@ -46,59 +47,144 @@ int dataset_list_callb(zfs_handle_t *dataset, void *data) { return 0; } -int dataset_list_root(libzfs_handle_t *libzfs, dataset_list_t **first) { +dataset_list_ptr dataset_list_root() { int err = 0; dataset_list_t *zlist = create_dataset_list_item(); - err = zfs_iter_root(libzfs, dataset_list_callb, &zlist); - if ( zlist->zh ) { - *first = zlist; - } else { - *first = 0; + err = zfs_iter_root(libzfsHandle, dataset_list_callb, &zlist); + if ( err != 0 || zlist->zh == NULL) { dataset_list_free(zlist); + return NULL; } - return err; + return zlist; } -dataset_list_t *dataset_next(dataset_list_t *dataset) { +dataset_list_ptr dataset_next(dataset_list_t *dataset) { return dataset->pnext; } - -int dataset_list_children(zfs_handle_t *zfs, dataset_list_t **first) { - int err = 0; - dataset_list_t *zlist = create_dataset_list_item(); - err = zfs_iter_children(zfs, dataset_list_callb, &zlist); - if ( zlist->zh ) { - *first = zlist; - } else { - *first = 0; - dataset_list_free(zlist); - } - return err; +int dataset_type(dataset_list_ptr dataset) { + return zfs_get_type(dataset->zh); } -int read_dataset_property(zfs_handle_t *zh, property_list_t *list, int prop) { +dataset_list_ptr dataset_open(const char *path) { + dataset_list_ptr list = create_dataset_list_item(); + list->zh = zfs_open(libzfsHandle, path, 0xF); + if (list->zh == NULL) { + dataset_list_free(list); + list = NULL; + } + return list; +} + +int dataset_create(const char *path, zfs_type_t type, nvlist_ptr props) { + return zfs_create(libzfsHandle, path, type, props); +} + +int dataset_destroy(dataset_list_ptr dataset, boolean_t defer) { + return zfs_destroy(dataset->zh, defer); +} + +dataset_list_t *dataset_list_children(dataset_list_t *dataset) { + int err = 0; + dataset_list_t *zlist = create_dataset_list_item(); + err = zfs_iter_children(dataset->zh, dataset_list_callb, &zlist); + if ( err != 0 || zlist->zh == NULL) { + dataset_list_free(zlist); + return NULL; + } + return zlist; +} + +zpool_list_ptr dataset_get_pool(dataset_list_ptr dataset) { + zpool_list_ptr pool = create_zpool_list_item(); + if(pool != NULL) { + pool->zph = zfs_get_pool_handle(dataset->zh); + } + return pool; +} + +int dataset_prop_set(dataset_list_ptr dataset, zfs_prop_t prop, const char *value) { + return zfs_prop_set(dataset->zh, zfs_prop_to_name(prop), value); +} + +int dataset_user_prop_set(dataset_list_ptr dataset, const char *prop, const char *value) { + return zfs_prop_set(dataset->zh, prop, value); +} + +int dataset_clone(dataset_list_ptr dataset, const char *target, nvlist_ptr props) { + return zfs_clone(dataset->zh, target, props); +} + +int dataset_snapshot(const char *path, boolean_t recur, nvlist_ptr props) { + return zfs_snapshot(libzfsHandle, path, recur, props); +} + +int dataset_rollback(dataset_list_ptr dataset, dataset_list_ptr snapshot, boolean_t force) { + return zfs_rollback(dataset->zh, snapshot->zh, force); +} + +int dataset_promote(dataset_list_ptr dataset) { + return zfs_promote(dataset->zh); +} + +int dataset_rename(dataset_list_ptr dataset, const char* new_name, boolean_t recur, boolean_t force_unm) { + return zfs_rename(dataset->zh, new_name, recur, force_unm); +} + +const char *dataset_is_mounted(dataset_list_ptr dataset){ + char *mp; + if (0 != zfs_is_mounted(dataset->zh, &mp)) { + return NULL; + } + return mp; +} + +int dataset_mount(dataset_list_ptr dataset, const char *options, int flags) { + return zfs_mount(dataset->zh, options, flags); +} + +int dataset_unmount(dataset_list_ptr dataset, int flags) { + return zfs_unmount(dataset->zh, NULL, flags); +} + +int dataset_unmountall(dataset_list_ptr dataset, int flags) { + return zfs_unmountall(dataset->zh, flags); +} + +const char *dataset_get_name(dataset_list_ptr ds) { + return zfs_get_name(ds->zh); +} + +//int read_dataset_property(zfs_handle_t *zh, property_list_t *list, int prop) { +property_list_t *read_dataset_property(dataset_list_t *dataset, int prop) { int r = 0; zprop_source_t source; char statbuf[INT_MAX_VALUE]; + property_list_ptr list; + list = new_property_list(); - r = zfs_prop_get(zh, prop, + r = zfs_prop_get(dataset->zh, prop, list->value, INT_MAX_VALUE, &source, statbuf, INT_MAX_VALUE, 1); if (r == 0) { // strcpy(list->name, zpool_prop_to_name(prop)); zprop_source_tostr(list->source, source); + list->property = (int)prop; + } else { + free_properties(list); + list = NULL; } - list->property = (int)prop; - return r; + return list; } -int read_user_property(zfs_handle_t *zh, property_list_t *list, const char *prop) { - nvlist_t *user_props = zfs_get_user_props(zh); +// int read_user_property(zfs_handle_t *zh, property_list_t *list, const char *prop) { +property_list_t *read_user_property(dataset_list_t *dataset, const char* prop) { + nvlist_t *user_props = zfs_get_user_props(dataset->zh); nvlist_t *propval; zprop_source_t sourcetype; char *strval; char *sourceval; // char source[ZFS_MAX_DATASET_NAME_LEN]; + property_list_ptr list = new_property_list(); if (nvlist_lookup_nvlist(user_props, prop, &propval) != 0) { @@ -113,7 +199,7 @@ int read_user_property(zfs_handle_t *zh, property_list_t *list, const char *prop ZPROP_SOURCE, &sourceval) == 0); if (strcmp(sourceval, - zfs_get_name(zh)) == 0) { + zfs_get_name(dataset->zh)) == 0) { sourcetype = ZPROP_SRC_LOCAL; (void) strncpy(list->source, "local", sizeof (list->source)); @@ -130,12 +216,7 @@ int read_user_property(zfs_handle_t *zh, property_list_t *list, const char *prop } (void) strncpy(list->value, strval, sizeof (list->value)); - return 0; -} - -int clear_last_error(libzfs_handle_t *hdl) { - zfs_standard_error(hdl, EZFS_SUCCESS, "success"); - return 0; + return list; } char** alloc_cstrings(int size) { diff --git a/zfs.go b/zfs.go index 2be0be1..e4ec4f3 100644 --- a/zfs.go +++ b/zfs.go @@ -2,6 +2,7 @@ package zfs // #include // #include +// #include "common.h" // #include "zpool.h" // #include "zfs.h" import "C" @@ -44,12 +45,11 @@ type Dataset struct { } func (d *Dataset) openChildren() (err error) { - var list C.dataset_list_ptr d.Children = make([]Dataset, 0, 5) - errcode := C.dataset_list_children(d.list.zh, unsafe.Pointer(&list)) + list := C.dataset_list_children(d.list) for list != nil { dataset := Dataset{list: list} - dataset.Type = DatasetType(C.zfs_get_type(dataset.list.zh)) + dataset.Type = DatasetType(C.dataset_type(d.list)) dataset.Properties = make(map[Prop]Property) err = dataset.ReloadProperties() if err != nil { @@ -58,10 +58,6 @@ func (d *Dataset) openChildren() (err error) { d.Children = append(d.Children, dataset) list = C.dataset_next(list) } - if errcode != 0 { - err = LastError() - return - } for ci := range d.Children { if err = d.Children[ci].openChildren(); err != nil { return @@ -74,9 +70,9 @@ func (d *Dataset) openChildren() (err error) { // (file-systems, volumes or snapshots). func DatasetOpenAll() (datasets []Dataset, err error) { var dataset Dataset - errcode := C.dataset_list_root(libzfsHandle, unsafe.Pointer(&dataset.list)) + dataset.list = C.dataset_list_root() for dataset.list != nil { - dataset.Type = DatasetType(C.zfs_get_type(dataset.list.zh)) + dataset.Type = DatasetType(C.dataset_type(dataset.list)) err = dataset.ReloadProperties() if err != nil { return @@ -84,10 +80,6 @@ func DatasetOpenAll() (datasets []Dataset, err error) { datasets = append(datasets, dataset) dataset.list = C.dataset_next(dataset.list) } - if errcode != 0 { - err = LastError() - return - } for ci := range datasets { if err = datasets[ci].openChildren(); err != nil { return @@ -106,22 +98,23 @@ func DatasetCloseAll(datasets []Dataset) { // DatasetOpen open dataset and all of its recursive children datasets func DatasetOpen(path string) (d Dataset, err error) { - d.list = C.create_dataset_list_item() csPath := C.CString(path) - d.list.zh = C.zfs_open(libzfsHandle, csPath, 0xF) + d.list = C.dataset_open(csPath) C.free(unsafe.Pointer(csPath)) - if d.list.zh == nil { + if d.list == nil || d.list.zh == nil { err = LastError() if err == nil { err = fmt.Errorf("dataset not found.") } + println("open failed") return } - d.Type = DatasetType(C.zfs_get_type(d.list.zh)) + d.Type = DatasetType(C.dataset_type(d.list)) d.Properties = make(map[Prop]Property) err = d.ReloadProperties() if err != nil { + println("reload properties failed") return } err = d.openChildren() @@ -131,16 +124,15 @@ func DatasetOpen(path string) (d Dataset, err error) { func datasetPropertiesTonvlist(props map[Prop]Property) ( cprops C.nvlist_ptr, err error) { // convert properties to nvlist C type - r := C.nvlist_alloc(unsafe.Pointer(&cprops), C.NV_UNIQUE_NAME, 0) - if r != 0 { + cprops = C.new_property_nvlist() + if cprops == nil { err = errors.New("Failed to allocate properties") return } for prop, value := range props { csValue := C.CString(value.Value) - r := C.nvlist_add_string( - cprops, C.zfs_prop_to_name( - C.zfs_prop_t(prop)), csValue) + r := C.property_nvlist_add( + cprops, C.zfs_prop_to_name(C.zfs_prop_t(prop)), csValue) C.free(unsafe.Pointer(csValue)) if r != 0 { err = errors.New("Failed to convert property") @@ -161,13 +153,13 @@ func DatasetCreate(path string, dtype DatasetType, defer C.nvlist_free(cprops) csPath := C.CString(path) - errcode := C.zfs_create(libzfsHandle, csPath, - C.zfs_type_t(dtype), cprops) + errcode := C.dataset_create(csPath, C.zfs_type_t(dtype), cprops) C.free(unsafe.Pointer(csPath)) if errcode != 0 { err = LastError() + return } - return + return DatasetOpen(path) } // Close close dataset and all its recursive children datasets (close handle @@ -184,7 +176,8 @@ func (d *Dataset) Close() { // Destroy destroys the dataset. The caller must make sure that the filesystem // isn't mounted, and that there are no active dependents. Set Defer argument -// to true to defer destruction for when dataset is not in use. +// to true to defer destruction for when dataset is not in use. Call Close() to +// cleanup memory. func (d *Dataset) Destroy(Defer bool) (err error) { if len(d.Children) > 0 { path, e := d.Path() @@ -200,7 +193,7 @@ func (d *Dataset) Destroy(Defer bool) (err error) { return } if d.list != nil { - if ec := C.zfs_destroy(d.list.zh, booleanT(Defer)); ec != 0 { + if ec := C.dataset_destroy(d.list, booleanT(Defer)); ec != 0 { err = LastError() } } else { @@ -232,9 +225,8 @@ func (d *Dataset) Pool() (p Pool, err error) { err = errors.New(msgDatasetIsNil) return } - p.list = C.create_zpool_list_item() - p.list.zph = C.zfs_get_pool_handle(d.list.zh) - if p.list != nil { + p.list = C.dataset_get_pool(d.list) + if p.list != nil && p.list.zph != nil { err = p.ReloadProperties() return } @@ -248,14 +240,10 @@ func (d *Dataset) ReloadProperties() (err error) { err = errors.New(msgDatasetIsNil) return } - var plist C.property_list_ptr - d.Properties = make(map[Prop]Property) for prop := DatasetPropType; prop < DatasetNumProps; prop++ { - plist = C.new_property_list() - errcode := C.read_dataset_property(d.list.zh, plist, C.int(prop)) - if errcode != 0 { - C.free_properties(plist) + plist := C.read_dataset_property(d.list, C.int(prop)) + if plist == nil { continue } d.Properties[prop] = Property{Value: C.GoString(&(*plist).value[0]), @@ -272,14 +260,12 @@ func (d *Dataset) GetProperty(p Prop) (prop Property, err error) { err = errors.New(msgDatasetIsNil) return } - var plist C.property_list_ptr - plist = C.new_property_list() - defer C.free_properties(plist) - errcode := C.read_dataset_property(d.list.zh, plist, C.int(p)) - if errcode != 0 { + plist := C.read_dataset_property(d.list, C.int(p)) + if plist == nil { err = LastError() return } + defer C.free_properties(plist) prop = Property{Value: C.GoString(&(*plist).value[0]), Source: C.GoString(&(*plist).source[0])} d.Properties[p] = prop @@ -291,16 +277,14 @@ func (d *Dataset) GetUserProperty(p string) (prop Property, err error) { err = errors.New(msgDatasetIsNil) return } - var plist C.property_list_ptr - plist = C.new_property_list() - defer C.free_properties(plist) csp := C.CString(p) defer C.free(unsafe.Pointer(csp)) - errcode := C.read_user_property(d.list.zh, plist, csp) - if errcode != 0 { + plist := C.read_user_property(d.list, csp) + if plist == nil { err = LastError() return } + defer C.free_properties(plist) prop = Property{Value: C.GoString(&(*plist).value[0]), Source: C.GoString(&(*plist).source[0])} return @@ -315,8 +299,7 @@ func (d *Dataset) SetProperty(p Prop, value string) (err error) { return } csValue := C.CString(value) - errcode := C.zfs_prop_set(d.list.zh, C.zfs_prop_to_name( - C.zfs_prop_t(p)), csValue) + errcode := C.dataset_prop_set(d.list, C.zfs_prop_t(p), csValue) C.free(unsafe.Pointer(csValue)) if errcode != 0 { err = LastError() @@ -335,7 +318,7 @@ func (d *Dataset) SetUserProperty(prop, value string) (err error) { } csValue := C.CString(value) csProp := C.CString(prop) - errcode := C.zfs_prop_set(d.list.zh, csProp, csValue) + errcode := C.dataset_user_prop_set(d.list, csProp, csValue) C.free(unsafe.Pointer(csValue)) C.free(unsafe.Pointer(csProp)) if errcode != 0 { @@ -358,7 +341,7 @@ func (d *Dataset) Clone(target string, props map[Prop]Property) (rd Dataset, err defer C.nvlist_free(cprops) csTarget := C.CString(target) defer C.free(unsafe.Pointer(csTarget)) - if errc := C.zfs_clone(d.list.zh, csTarget, cprops); errc != 0 { + if errc := C.dataset_clone(d.list, csTarget, cprops); errc != 0 { err = LastError() return } @@ -375,7 +358,7 @@ func DatasetSnapshot(path string, recur bool, props map[Prop]Property) (rd Datas defer C.nvlist_free(cprops) csPath := C.CString(path) defer C.free(unsafe.Pointer(csPath)) - if errc := C.zfs_snapshot(libzfsHandle, csPath, booleanT(recur), cprops); errc != 0 { + if errc := C.dataset_snapshot(csPath, booleanT(recur), cprops); errc != 0 { err = LastError() return } @@ -389,7 +372,7 @@ func (d *Dataset) Path() (path string, err error) { err = errors.New(msgDatasetIsNil) return } - name := C.zfs_get_name(d.list.zh) + name := C.dataset_get_name(d.list) path = C.GoString(name) return } @@ -400,10 +383,11 @@ func (d *Dataset) Rollback(snap *Dataset, force bool) (err error) { err = errors.New(msgDatasetIsNil) return } - if errc := C.zfs_rollback(d.list.zh, - snap.list.zh, booleanT(force)); errc != 0 { + if errc := C.dataset_rollback(d.list, snap.list, booleanT(force)); errc != 0 { err = LastError() + return } + d.ReloadProperties() return } @@ -413,9 +397,11 @@ func (d *Dataset) Promote() (err error) { err = errors.New(msgDatasetIsNil) return } - if errc := C.zfs_promote(d.list.zh); errc != 0 { + if errc := C.dataset_promote(d.list); errc != 0 { err = LastError() + return } + d.ReloadProperties() return } @@ -428,10 +414,12 @@ func (d *Dataset) Rename(newName string, recur, } csNewName := C.CString(newName) defer C.free(unsafe.Pointer(csNewName)) - if errc := C.zfs_rename(d.list.zh, csNewName, + if errc := C.dataset_rename(d.list, csNewName, booleanT(recur), booleanT(forceUnmount)); errc != 0 { err = LastError() + return } + d.ReloadProperties() return } @@ -439,16 +427,15 @@ func (d *Dataset) Rename(newName string, recur, // sets in 'where' argument the current mountpoint, and returns true. Otherwise, // returns false. func (d *Dataset) IsMounted() (mounted bool, where string) { - var cw C.char_ptr if d.list == nil { - return false, "" + return } - m := C.zfs_is_mounted(d.list.zh, unsafe.Pointer(&cw)) - // defer C.free(cw) - if m != 0 { - return true, C.GoString(cw) + mp := C.dataset_is_mounted(d.list) + // defer C.free(mp) + if mounted = (mp != nil); mounted { + where = C.GoString(mp) } - return false, "" + return } // Mount the given filesystem. @@ -459,7 +446,7 @@ func (d *Dataset) Mount(options string, flags int) (err error) { } csOptions := C.CString(options) defer C.free(unsafe.Pointer(csOptions)) - if ec := C.zfs_mount(d.list.zh, csOptions, C.int(flags)); ec != 0 { + if ec := C.dataset_mount(d.list, csOptions, C.int(flags)); ec != 0 { err = LastError() } return @@ -471,7 +458,7 @@ func (d *Dataset) Unmount(flags int) (err error) { err = errors.New(msgDatasetIsNil) return } - if ec := C.zfs_unmount(d.list.zh, nil, C.int(flags)); ec != 0 { + if ec := C.dataset_unmount(d.list, C.int(flags)); ec != 0 { err = LastError() } return @@ -484,7 +471,7 @@ func (d *Dataset) UnmountAll(flags int) (err error) { err = errors.New(msgDatasetIsNil) return } - if ec := C.zfs_unmountall(d.list.zh, C.int(flags)); ec != 0 { + if ec := C.dataset_unmountall(d.list, C.int(flags)); ec != 0 { err = LastError() } return diff --git a/zfs.h b/zfs.h index 0885e25..d70566e 100644 --- a/zfs.h +++ b/zfs.h @@ -18,14 +18,30 @@ dataset_list_t *create_dataset_list_item(); void dataset_list_close(dataset_list_t *list); void dataset_list_free(dataset_list_t *list); -int dataset_list_root(libzfs_handle_t *libzfs, dataset_list_t **first); -int dataset_list_children(zfs_handle_t *zfs, dataset_list_t **first); +dataset_list_t* dataset_list_root(); +dataset_list_t* dataset_list_children(dataset_list_t *dataset); dataset_list_t *dataset_next(dataset_list_t *dataset); +int dataset_type(dataset_list_ptr dataset); -int read_dataset_property(zfs_handle_t *zh, property_list_t *list, int prop); -int read_user_property(zfs_handle_t *zh, property_list_t *list, const char* prop); +dataset_list_ptr dataset_open(const char *path); +int dataset_create(const char *path, zfs_type_t type, nvlist_ptr props); +int dataset_destroy(dataset_list_ptr dataset, boolean_t defer); +zpool_list_ptr dataset_get_pool(dataset_list_ptr dataset); +int dataset_prop_set(dataset_list_ptr dataset, zfs_prop_t prop, const char *value); +int dataset_user_prop_set(dataset_list_ptr dataset, const char *prop, const char *value); +int dataset_clone(dataset_list_ptr dataset, const char *target, nvlist_ptr props); +int dataset_snapshot(const char *path, boolean_t recur, nvlist_ptr props); +int dataset_rollback(dataset_list_ptr dataset, dataset_list_ptr snapshot, boolean_t force); +int dataset_promote(dataset_list_ptr dataset); +int dataset_rename(dataset_list_ptr dataset, const char* new_name, boolean_t recur, boolean_t force_unm); +const char* dataset_is_mounted(dataset_list_ptr dataset); +int dataset_mount(dataset_list_ptr dataset, const char *options, int flags); +int dataset_unmount(dataset_list_ptr dataset, int flags); +int dataset_unmountall(dataset_list_ptr dataset, int flags); +const char *dataset_get_name(dataset_list_ptr ds); -int clear_last_error(libzfs_handle_t *libzfs); +property_list_t *read_dataset_property(dataset_list_t *dataset, int prop); +property_list_t *read_user_property(dataset_list_t *dataset, const char* prop); char** alloc_cstrings(int size); void strings_setat(char **a, int at, char *v); diff --git a/zpool.c b/zpool.c index 5755312..c63665a 100644 --- a/zpool.c +++ b/zpool.c @@ -7,6 +7,7 @@ #include #include +#include "common.h" #include "zpool.h" char *sZPOOL_CONFIG_VERSION = ZPOOL_CONFIG_VERSION; @@ -109,26 +110,24 @@ int zpool_list_callb(zpool_handle_t *pool, void *data) { return 0; } -int zpool_list(libzfs_handle_t *libzfs, zpool_list_t **first) { +zpool_list_ptr zpool_list_openall() { int err = 0; zpool_list_t *zlist = create_zpool_list_item(); - err = zpool_iter(libzfs, zpool_list_callb, &zlist); - if ( zlist->zph ) { - *first = zlist; - } else { - *first = 0; - free(zlist); + err = zpool_iter(libzfsHandle, zpool_list_callb, &zlist); + if ( err != 0 || zlist->zph == NULL ) { + zpool_list_free(zlist); + zlist = NULL; } - return err; + return zlist; } -zpool_list_t* zpool_list_open(libzfs_handle_t *libzfs, const char *name) { +zpool_list_t* zpool_list_open(const char *name) { zpool_list_t *zlist = create_zpool_list_item(); - zlist->zph = zpool_open(libzfs, name); + zlist->zph = zpool_open(libzfsHandle, name); if ( zlist->zph ) { return zlist; } else { - free(zlist); + zpool_list_free(zlist); } return 0; } @@ -137,26 +136,18 @@ zpool_list_t *zpool_next(zpool_list_t *pool) { return pool->pnext; } +void zpool_list_free(zpool_list_t *list) { + zpool_list_ptr next; + while(list) { + next = list->pnext; + free(list); + list = next; + } +} + void zpool_list_close(zpool_list_t *pool) { zpool_close(pool->zph); - free(pool); -} - -property_list_t *new_property_list() { - property_list_t *r = malloc(sizeof(property_list_t)); - memset(r, 0, sizeof(property_list_t)); - return r; -} - -void free_properties(property_list_t *root) { - if (root != 0) { - property_list_t *tmp = 0; - do { - tmp = root->pnext; - free(root); - root = tmp; - } while(tmp); - } + zpool_list_free(pool); } property_list_t *next_property(property_list_t *list) { @@ -191,161 +182,68 @@ void zprop_source_tostr(char *dst, zprop_source_t source) { } -int read_zpool_property(zpool_handle_t *zh, property_list_t *list, int prop) { +property_list_ptr read_zpool_property(zpool_list_ptr pool, int prop) { int r = 0; zprop_source_t source; + property_list_ptr list = new_property_list(); - r = zpool_get_prop(zh, prop, + r = zpool_get_prop(pool->zph, prop, list->value, INT_MAX_VALUE, &source); if (r == 0) { // strcpy(list->name, zpool_prop_to_name(prop)); zprop_source_tostr(list->source, source); + } else { + free_properties(list); + return NULL; } list->property = (int)prop; - return r; + return list; } -int read_append_zpool_property(zpool_handle_t *zh, property_list_t **proot, - zpool_prop_t prop) { +property_list_ptr read_append_zpool_property(zpool_list_ptr pool, property_list_ptr proot, zpool_prop_t prop) { int r = 0; - property_list_t *newitem = NULL, *root = *proot; - newitem = new_property_list(); + property_list_t *newitem = NULL; - r = read_zpool_property(zh, newitem, prop); - // printf("p: %s %s %s\n", newitem->name, newitem->value, newitem->source); - newitem->pnext = root; - *proot = root = newitem; - if (r != 0) { - free_properties(root); - *proot = NULL; + newitem = read_zpool_property(pool, prop); + if (newitem == NULL) { + return proot; } - return r; + // printf("p: %s %s %s\n", newitem->name, newitem->value, newitem->source); + newitem->pnext = proot; + proot = newitem; + + return proot; } -property_list_t *read_zpool_properties(zpool_handle_t *zh) { +property_list_t *read_zpool_properties(zpool_list_ptr pool) { // read pool name as first property property_list_t *root = NULL, *list = NULL; - int r = read_append_zpool_property(zh, &root, ZPOOL_PROP_NAME); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_SIZE); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_CAPACITY); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_ALTROOT); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_HEALTH); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_GUID); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_VERSION); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_BOOTFS); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_DELEGATION); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_AUTOREPLACE); - if (r != 0) { - return 0; - } - - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_CACHEFILE); - if (r != 0) { - return 0; - } - - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_FAILUREMODE); - if (r != 0) { - return 0; - } - - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_LISTSNAPS); - if (r != 0) { - return 0; - } - - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_AUTOEXPAND); - if (r != 0) { - return 0; - } - - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_DEDUPDITTO); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_DEDUPRATIO); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_FREE); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_ALLOCATED); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_READONLY); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_ASHIFT); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_COMMENT); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_EXPANDSZ); - if (r != 0) { - return 0; - } - - r = read_append_zpool_property(zh, &root, ZPOOL_PROP_FREEING); - if (r != 0) { - return 0; - } + root = read_append_zpool_property(pool, root, ZPOOL_PROP_NAME); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_SIZE); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_CAPACITY); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_ALTROOT); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_HEALTH); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_GUID); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_VERSION); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_BOOTFS); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_DELEGATION); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_AUTOREPLACE); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_CACHEFILE); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_FAILUREMODE); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_LISTSNAPS); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_AUTOEXPAND); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_DEDUPDITTO); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_DEDUPRATIO); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_FREE); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_ALLOCATED); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_READONLY); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_ASHIFT); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_COMMENT); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_EXPANDSZ); + root = read_append_zpool_property(pool, root, ZPOOL_PROP_FREEING); + list = new_property_list(); @@ -370,85 +268,75 @@ const char *gettext(const char *txt) { /* * Add a property pair (name, string-value) into a property nvlist. */ -int -add_prop_list(const char *propname, char *propval, nvlist_t **props, - boolean_t poolprop) { - zpool_prop_t prop = ZPROP_INVAL; - zfs_prop_t fprop; - nvlist_t *proplist; - const char *normnm; - char *strval; +// int +// add_prop_list(const char *propname, char *propval, nvlist_t **props, +// boolean_t poolprop) { +// zpool_prop_t prop = ZPROP_INVAL; +// zfs_prop_t fprop; +// nvlist_t *proplist; +// const char *normnm; +// char *strval; - if (*props == NULL && - nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) { - (void) snprintf(_lasterr_, 1024, "internal error: out of memory"); - return (1); - } +// if (*props == NULL && +// nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) { +// (void) snprintf(_lasterr_, 1024, "internal error: out of memory"); +// return (1); +// } - proplist = *props; +// proplist = *props; - if (poolprop) { - const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION); +// if (poolprop) { +// const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION); - if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL && - !zpool_prop_feature(propname)) { - (void) snprintf(_lasterr_, 1024, "property '%s' is " - "not a valid pool property", propname); - return (2); - } +// if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL && +// !zpool_prop_feature(propname)) { +// (void) snprintf(_lasterr_, 1024, "property '%s' is " +// "not a valid pool property", propname); +// return (2); +// } - /* - * feature@ properties and version should not be specified - * at the same time. - */ - // if ((prop == ZPROP_INVAL && zpool_prop_feature(propname) && - // nvlist_exists(proplist, vname)) || - // (prop == ZPOOL_PROP_VERSION && - // prop_list_contains_feature(proplist))) { - // (void) fprintf(stderr, gettext("'feature@' and " - // "'version' properties cannot be specified " - // "together\n")); - // return (2); - // } +// /* +// * feature@ properties and version should not be specified +// * at the same time. +// */ +// // if ((prop == ZPROP_INVAL && zpool_prop_feature(propname) && +// // nvlist_exists(proplist, vname)) || +// // (prop == ZPOOL_PROP_VERSION && +// // prop_list_contains_feature(proplist))) { +// // (void) fprintf(stderr, gettext("'feature@' and " +// // "'version' properties cannot be specified " +// // "together\n")); +// // return (2); +// // } - if (zpool_prop_feature(propname)) - normnm = propname; - else - normnm = zpool_prop_to_name(prop); - } else { - if ((fprop = zfs_name_to_prop(propname)) != ZPROP_INVAL) { - normnm = zfs_prop_to_name(fprop); - } else { - normnm = propname; - } - } +// if (zpool_prop_feature(propname)) +// normnm = propname; +// else +// normnm = zpool_prop_to_name(prop); +// } else { +// if ((fprop = zfs_name_to_prop(propname)) != ZPROP_INVAL) { +// normnm = zfs_prop_to_name(fprop); +// } else { +// normnm = propname; +// } +// } - if (nvlist_lookup_string(proplist, normnm, &strval) == 0 && - prop != ZPOOL_PROP_CACHEFILE) { - (void) snprintf(_lasterr_, 1024, "property '%s' " - "specified multiple times", propname); - return (2); - } +// if (nvlist_lookup_string(proplist, normnm, &strval) == 0 && +// prop != ZPOOL_PROP_CACHEFILE) { +// (void) snprintf(_lasterr_, 1024, "property '%s' " +// "specified multiple times", propname); +// return (2); +// } - if (nvlist_add_string(proplist, normnm, propval) != 0) { - (void) snprintf(_lasterr_, 1024, "internal " - "error: out of memory\n"); - return (1); - } +// if (nvlist_add_string(proplist, normnm, propval) != 0) { +// (void) snprintf(_lasterr_, 1024, "internal " +// "error: out of memory\n"); +// return (1); +// } - return (0); -} - -int nvlist_lookup_uint64_array_vds(nvlist_t *nv, const char *p, - vdev_stat_t **vds, uint_t *c) { - return nvlist_lookup_uint64_array(nv, p, (uint64_t**)vds, c); -} - -int nvlist_lookup_uint64_array_ps(nvlist_t *nv, const char *p, - pool_scan_stat_t **vds, uint_t *c) { - return nvlist_lookup_uint64_array(nv, p, (uint64_t**)vds, c); -} +// return (0); +// } nvlist_t** nvlist_alloc_array(int count) { return malloc(count*sizeof(nvlist_t*)); @@ -478,3 +366,100 @@ int refresh_stats(zpool_list_t *pool) } return 0; } + +const char *get_vdev_type(nvlist_ptr nv) { + char *value = NULL; + int r = nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &value); + if(r != 0) { + return NULL; + } + return value; +} + +const vdev_stat_ptr get_vdev_stats(nvlist_ptr nv) { + vdev_stat_ptr vs = NULL; + uint_t count; + int r = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, (uint64_t**)&vs, &count); + if(r != 0) { + return NULL; + } + return vs; +} + +pool_scan_stat_ptr get_vdev_scan_stats(nvlist_t *nv) { + pool_scan_stat_ptr vds = NULL; + uint_t c; + int r = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_SCAN_STATS, (uint64_t**)&vds, &c); + if(r != 0) { + return NULL; + } + return vds; +} + +vdev_children_ptr get_vdev_children(nvlist_t *nv) { + int r; + vdev_children_ptr children = malloc(sizeof(vdev_children_t)); + memset(children, 0, sizeof(vdev_children_t)); + r = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &(children->first), &(children->count)); + if (r != 0) { + free(children); + return NULL; + } + return children; +} + +const char *get_vdev_path(nvlist_ptr nv) { + char *path = NULL; + uint64_t notpresent = 0; + int r = nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, ¬present); + if (r == 0 || notpresent != 0) { + if ( 0 != nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) ) { + return NULL; + } + } + return path; +} + +uint64_t get_vdev_is_log(nvlist_ptr nv) { + uint64_t islog = B_FALSE; + nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); + return islog; +} + + +// return +uint64_t get_zpool_state(nvlist_ptr nv) { + uint64_t state = 0; + nvlist_lookup_uint64(nv, ZPOOL_CONFIG_POOL_STATE, &state); + return state; +} + +uint64_t get_zpool_guid(nvlist_ptr nv) { + uint64_t guid = 0; + nvlist_lookup_uint64(nv, ZPOOL_CONFIG_POOL_GUID, &guid); + return guid; +} + +const char *get_zpool_name(nvlist_ptr nv) { + char *name = NULL; + if (0 != nvlist_lookup_string(nv, ZPOOL_CONFIG_POOL_NAME, &name)) { + return NULL; + } + return name; +} + +const char *get_zpool_comment(nvlist_ptr nv) { + char *comment = NULL; + if (0 != nvlist_lookup_string(nv, ZPOOL_CONFIG_COMMENT, &comment)) { + return NULL; + } + return comment; +} + +nvlist_ptr get_zpool_vdev_tree(nvlist_ptr nv) { + nvlist_ptr vdev_tree = NULL; + if ( 0 != nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) ) { + return NULL; + } + return vdev_tree; +} \ No newline at end of file diff --git a/zpool.go b/zpool.go index 940f8b0..d6aefda 100644 --- a/zpool.go +++ b/zpool.go @@ -2,6 +2,7 @@ package zfs // #include // #include +// #include "common.h" // #include "zpool.h" // #include "zfs.h" import "C" @@ -138,7 +139,7 @@ type Pool struct { func PoolOpen(name string) (pool Pool, err error) { csName := C.CString(name) defer C.free(unsafe.Pointer(csName)) - pool.list = C.zpool_list_open(libzfsHandle, csName) + pool.list = C.zpool_list_open(csName) if pool.list != nil { err = pool.ReloadProperties() @@ -150,12 +151,10 @@ func PoolOpen(name string) (pool Pool, err error) { func poolGetConfig(name string, nv C.nvlist_ptr) (vdevs VDevTree, err error) { var dtype C.char_ptr - var c, children C.uint_t - var notpresent C.uint64_t var vs C.vdev_stat_ptr var ps C.pool_scan_stat_ptr - var child *C.nvlist_ptr - if 0 != C.nvlist_lookup_string(nv, C.sZPOOL_CONFIG_TYPE, unsafe.Pointer(&dtype)) { + var children C.vdev_children_ptr + if dtype = C.get_vdev_type(nv); dtype == nil { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_TYPE) return } @@ -166,8 +165,7 @@ func poolGetConfig(name string, nv C.nvlist_ptr) (vdevs VDevTree, err error) { } // Fetch vdev state - if 0 != C.nvlist_lookup_uint64_array_vds(nv, C.sZPOOL_CONFIG_VDEV_STATS, - unsafe.Pointer(&vs), &c) { + if vs = C.get_vdev_stats(nv); vs == nil { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_VDEV_STATS) return } @@ -192,8 +190,7 @@ func poolGetConfig(name string, nv C.nvlist_ptr) (vdevs VDevTree, err error) { vdevs.Stat.Fragmentation = uint64(vs.vs_fragmentation) // Fetch vdev scan stats - if 0 == C.nvlist_lookup_uint64_array_ps(nv, C.sZPOOL_CONFIG_SCAN_STATS, - unsafe.Pointer(&ps), &c) { + if ps = C.get_vdev_scan_stats(nv); ps != nil { vdevs.ScanStat.Func = uint64(ps.pss_func) vdevs.ScanStat.State = uint64(ps.pss_state) vdevs.ScanStat.StartTime = uint64(ps.pss_start_time) @@ -208,35 +205,28 @@ func poolGetConfig(name string, nv C.nvlist_ptr) (vdevs VDevTree, err error) { } // Fetch the children - if C.nvlist_lookup_nvlist_array(nv, C.sZPOOL_CONFIG_CHILDREN, - unsafe.Pointer(&child), &children) != 0 { - children = 0 + children = C.get_vdev_children(nv) + if children != nil { + // this object that reference childrens and count should be deallocated from memory + defer C.free(unsafe.Pointer(children)) + vdevs.Devices = make([]VDevTree, 0, children.count) } - if children > 0 { - vdevs.Devices = make([]VDevTree, 0, children) - } - if C.nvlist_lookup_uint64(nv, C.sZPOOL_CONFIG_NOT_PRESENT, - ¬present) == 0 || notpresent != 0 { - var path C.char_ptr - if 0 != C.nvlist_lookup_string(nv, C.sZPOOL_CONFIG_PATH, unsafe.Pointer(&path)) { - err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_PATH) - return - } + path := C.get_vdev_path(nv) + if path != nil { vdevs.Path = C.GoString(path) } - for c = 0; c < children; c++ { + for c := C.uint_t(0); children != nil && c < children.count; c++ { var islog = C.uint64_t(C.B_FALSE) - C.nvlist_lookup_uint64(C.nvlist_array_at(child, c), - C.sZPOOL_CONFIG_IS_LOG, &islog) + islog = C.get_vdev_is_log(C.nvlist_array_at(children.first, c)) if islog != C.B_FALSE { continue } - vname := C.zpool_vdev_name(libzfsHandle, nil, C.nvlist_array_at(child, c), + vname := C.zpool_vdev_name(C.libzfsHandle, nil, C.nvlist_array_at(children.first, c), C.B_TRUE) var vdev VDevTree vdev, err = poolGetConfig(C.GoString(vname), - C.nvlist_array_at(child, c)) + C.nvlist_array_at(children.first, c)) C.free(unsafe.Pointer(vname)) if err != nil { return @@ -251,7 +241,6 @@ func poolGetConfig(name string, nv C.nvlist_ptr) (vdevs VDevTree, err error) { func PoolImportSearch(searchpaths []string) (epools []ExportedPool, err error) { var config, nvroot C.nvlist_ptr var cname, msgid, comment C.char_ptr - var poolState, guid C.uint64_t var reason C.zpool_status_t var errata C.zpool_errata_t config = nil @@ -265,41 +254,35 @@ func PoolImportSearch(searchpaths []string) (epools []ExportedPool, err error) { C.strings_setat(cpaths, C.int(i), csPath) } - pools := C.zpool_find_import(libzfsHandle, C.int(numofp), cpaths) + pools := C.zpool_find_import(C.libzfsHandle, C.int(numofp), cpaths) defer C.nvlist_free(pools) elem = C.nvlist_next_nvpair(pools, elem) epools = make([]ExportedPool, 0, 1) for ; elem != nil; elem = C.nvlist_next_nvpair(pools, elem) { ep := ExportedPool{} - if C.nvpair_value_nvlist(elem, unsafe.Pointer(&config)) != 0 { + if C.nvpair_value_nvlist(elem, (**C.struct_nvlist)(&config)) != 0 { err = LastError() return } - if C.nvlist_lookup_uint64(config, C.sZPOOL_CONFIG_POOL_STATE, - &poolState) != 0 { - err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_POOL_STATE) - return - } - ep.State = PoolState(poolState) - if C.nvlist_lookup_string(config, C.sZPOOL_CONFIG_POOL_NAME, unsafe.Pointer(&cname)) != 0 { + + ep.State = PoolState(C.get_zpool_state(config)) + + if cname = C.get_zpool_name(config); cname == nil { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_POOL_NAME) return } ep.Name = C.GoString(cname) - if C.nvlist_lookup_uint64(config, C.sZPOOL_CONFIG_POOL_GUID, &guid) != 0 { - err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_POOL_GUID) - return - } - ep.GUID = uint64(guid) - reason = C.zpool_import_status(config, &msgid, &errata) + + ep.GUID = uint64(C.get_zpool_guid(config)) + + reason = C.zpool_import_status(config, (**C.char)(&msgid), &errata) ep.Status = PoolStatus(reason) - if C.nvlist_lookup_string(config, C.sZPOOL_CONFIG_COMMENT, unsafe.Pointer(&comment)) == 0 { + if comment = C.get_zpool_comment(config); comment != nil { ep.Comment = C.GoString(comment) } - if C.nvlist_lookup_nvlist(config, C.sZPOOL_CONFIG_VDEV_TREE, - unsafe.Pointer(&nvroot)) != 0 { + if nvroot = C.get_zpool_vdev_tree(config); nvroot == nil { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_VDEV_TREE) return } @@ -325,33 +308,26 @@ func poolSearchImport(q string, searchpaths []string, guid bool) (name string, C.strings_setat(cpaths, C.int(i), csPath) } - pools := C.zpool_find_import(libzfsHandle, C.int(numofp), cpaths) + pools := C.zpool_find_import(C.libzfsHandle, C.int(numofp), cpaths) defer C.nvlist_free(pools) elem = C.nvlist_next_nvpair(pools, elem) for ; elem != nil; elem = C.nvlist_next_nvpair(pools, elem) { var cq *C.char var tconfig *C.nvlist_t - retcode := C.nvpair_value_nvlist(elem, unsafe.Pointer(&tconfig)) + retcode := C.nvpair_value_nvlist(elem, (**C.struct_nvlist)(&tconfig)) if retcode != 0 { err = errPoolList return } if guid { - var iguid C.uint64_t - if retcode = C.nvlist_lookup_uint64(tconfig, - C.sZPOOL_CONFIG_POOL_GUID, &iguid); retcode != 0 { - err = errPoolList - return - } - sguid := fmt.Sprint(iguid) + sguid := fmt.Sprint(C.get_zpool_guid(tconfig)) if q == sguid { config = tconfig break } } else { - if retcode = C.nvlist_lookup_string(tconfig, - C.sZPOOL_CONFIG_POOL_NAME, unsafe.Pointer(&cq)); retcode != 0 { + if cq = C.get_zpool_name(tconfig); cq == nil { err = errPoolList return } @@ -369,14 +345,13 @@ func poolSearchImport(q string, searchpaths []string, guid bool) (name string, } if guid { // We need to get name so we can open pool by name - if retcode := C.nvlist_lookup_string(config, - C.sZPOOL_CONFIG_POOL_NAME, unsafe.Pointer(&cname)); retcode != 0 { + if cname = C.get_zpool_name(config); cname == nil { err = errPoolList return } name = C.GoString(cname) } - if retcode := C.zpool_import(libzfsHandle, config, cname, + if retcode := C.zpool_import(C.libzfsHandle, config, cname, nil); retcode != 0 { err = LastError() return @@ -416,17 +391,19 @@ func PoolImportByGUID(guid string, searchpaths []string) (pool Pool, err error) // anymore. Call Pool.Close() method. func PoolOpenAll() (pools []Pool, err error) { var pool Pool - errcode := C.zpool_list(libzfsHandle, unsafe.Pointer(&pool.list)) + if pool.list = C.zpool_list_openall(); pool.list == nil { + err = LastError() + return + } for pool.list != nil { err = pool.ReloadProperties() if err != nil { return } + next := C.zpool_next(pool.list) + pool.list.pnext = nil pools = append(pools, pool) - pool.list = C.zpool_next(pool.list) - } - if errcode != 0 { - err = LastError() + pool.list = next } return } @@ -469,7 +446,7 @@ func (pool *Pool) RefreshStats() (err error) { // ReloadProperties re-read ZFS pool properties and features, refresh // Pool.Properties and Pool.Features map func (pool *Pool) ReloadProperties() (err error) { - propList := C.read_zpool_properties(pool.list.zph) + propList := C.read_zpool_properties(pool.list) if propList == nil { err = LastError() return @@ -515,11 +492,12 @@ func (pool *Pool) GetProperty(p Prop) (prop Property, err error) { PoolPropertyToName(p))) return } - var list C.property_list_t - r := C.read_zpool_property(pool.list.zph, unsafe.Pointer(&list), C.int(p)) - if r != 0 { + list := C.read_zpool_property(pool.list, C.int(p)) + if list == nil { err = LastError() + return } + defer C.free_properties(list) prop.Value = C.GoString(&(list.value[0])) prop.Source = C.GoString(&(list.source[0])) pool.Properties[p] = prop @@ -576,8 +554,10 @@ func (pool *Pool) SetProperty(p Prop, value string) (err error) { // Close ZFS pool handler and release associated memory. // Do not use Pool object after this. func (pool *Pool) Close() { - C.zpool_list_close(pool.list) - pool.list = nil + if pool.list != nil { + C.zpool_list_close(pool.list) + pool.list = nil + } } // Name get (re-read) ZFS pool name property @@ -633,11 +613,11 @@ func (vdev *VDevTree) isLog() (r C.uint64_t) { } func toCPoolProperties(props PoolProperties) (cprops C.nvlist_ptr) { - cprops = nil + cprops = C.new_property_nvlist() for prop, value := range props { name := C.zpool_prop_to_name(C.zpool_prop_t(prop)) csPropValue := C.CString(value) - r := C.add_prop_list(name, csPropValue, unsafe.Pointer(&cprops), C.boolean_t(1)) + r := C.property_nvlist_add(cprops, name, csPropValue) C.free(unsafe.Pointer(csPropValue)) if r != 0 { if cprops != nil { @@ -651,11 +631,11 @@ func toCPoolProperties(props PoolProperties) (cprops C.nvlist_ptr) { } func toCDatasetProperties(props DatasetProperties) (cprops C.nvlist_ptr) { - cprops = nil + cprops = C.new_property_nvlist() for prop, value := range props { name := C.zfs_prop_to_name(C.zfs_prop_t(prop)) csPropValue := C.CString(value) - r := C.add_prop_list(name, csPropValue, unsafe.Pointer(&cprops), C.boolean_t(0)) + r := C.property_nvlist_add(cprops, name, csPropValue) C.free(unsafe.Pointer(csPropValue)) if r != 0 { if cprops != nil { @@ -696,9 +676,9 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, defer C.nvlist_free_array(l2cache) for i, vdev := range vdevs { grouping, mindevs, maxdevs := vdev.isGrouping() - var child C.nvlist_ptr + var child *C.struct_nvlist // fmt.Println(vdev.Type) - if r := C.nvlist_alloc(unsafe.Pointer(&child), C.NV_UNIQUE_NAME, 0); r != 0 { + if r := C.nvlist_alloc(&child, C.NV_UNIQUE_NAME, 0); r != 0 { err = errors.New("Failed to allocate vdev") return } @@ -812,8 +792,8 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, func PoolCreate(name string, vdevs []VDevTree, features map[string]string, props PoolProperties, fsprops DatasetProperties) (pool Pool, err error) { // create root vdev nvroot - var nvroot C.nvlist_ptr - if r := C.nvlist_alloc(unsafe.Pointer(&nvroot), C.NV_UNIQUE_NAME, 0); r != 0 { + var nvroot *C.struct_nvlist + if r := C.nvlist_alloc(&nvroot, C.NV_UNIQUE_NAME, 0); r != 0 { err = errors.New("Failed to allocate root vdev") return } @@ -860,8 +840,7 @@ func PoolCreate(name string, vdevs []VDevTree, features map[string]string, for fname, fval := range features { csName := C.CString(fmt.Sprintf("feature@%s", fname)) csVal := C.CString(fval) - r := C.add_prop_list(csName, csVal, unsafe.Pointer(&cprops), - C.boolean_t(1)) + r := C.property_nvlist_add(cprops, csName, csVal) C.free(unsafe.Pointer(csName)) C.free(unsafe.Pointer(csVal)) if r != 0 { @@ -876,7 +855,7 @@ func PoolCreate(name string, vdevs []VDevTree, features map[string]string, // Create actual pool then open csName := C.CString(name) defer C.free(unsafe.Pointer(csName)) - if r := C.zpool_create(libzfsHandle, csName, nvroot, + if r := C.zpool_create(C.libzfsHandle, csName, nvroot, cprops, cfsprops); r != 0 { err = LastError() err = errors.New(err.Error() + " (zpool_create)") @@ -890,14 +869,14 @@ func PoolCreate(name string, vdevs []VDevTree, features map[string]string, // Status get pool status. Let you check if pool healthy. func (pool *Pool) Status() (status PoolStatus, err error) { - var msgid C.char_ptr + var msgid *C.char var reason C.zpool_status_t var errata C.zpool_errata_t if pool.list == nil { err = errors.New(msgPoolIsNil) return } - reason = C.zpool_get_status(pool.list.zph, unsafe.Pointer(&msgid), &errata) + reason = C.zpool_get_status(pool.list.zph, &msgid, &errata) status = PoolStatus(reason) return } @@ -948,15 +927,14 @@ func (pool *Pool) ExportForce(log string) (err error) { // VDevTree - Fetch pool's current vdev tree configuration, state and stats func (pool *Pool) VDevTree() (vdevs VDevTree, err error) { - var nvroot C.nvlist_ptr + var nvroot *C.struct_nvlist var poolName string config := C.zpool_get_config(pool.list.zph, nil) if config == nil { err = fmt.Errorf("Failed zpool_get_config") return } - if C.nvlist_lookup_nvlist(config, C.sZPOOL_CONFIG_VDEV_TREE, - unsafe.Pointer(&nvroot)) != 0 { + if C.nvlist_lookup_nvlist(config, C.sZPOOL_CONFIG_VDEV_TREE, &nvroot) != 0 { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_VDEV_TREE) return } diff --git a/zpool.h b/zpool.h index 3aaf3c8..b86b9bd 100644 --- a/zpool.h +++ b/zpool.h @@ -5,72 +5,68 @@ #ifndef SERVERWARE_ZPOOL_H #define SERVERWARE_ZPOOL_H -#define INT_MAX_NAME 256 -#define INT_MAX_VALUE 1024 -#define ZAP_OLDMAXVALUELEN 1024 -#define ZFS_MAX_DATASET_NAME_LEN 256 - struct zpool_list { zpool_handle_t *zph; void *pnext; }; -typedef struct property_list { - char value[INT_MAX_VALUE]; - char source[ZFS_MAX_DATASET_NAME_LEN]; - int property; - void *pnext; -} property_list_t; +struct vdev_children { + nvlist_t **first; + uint_t count; +}; typedef struct zpool_list zpool_list_t; typedef struct zpool_list* zpool_list_ptr; -typedef struct libzfs_handle* libzfs_handle_ptr; -typedef struct nvlist* nvlist_ptr; -typedef struct property_list *property_list_ptr; +typedef struct vdev_children vdev_children_t; +typedef struct vdev_children* vdev_children_ptr; + typedef struct pool_scan_stat* pool_scan_stat_ptr; -typedef struct nvpair* nvpair_ptr; - -typedef struct vdev_stat* vdev_stat_ptr; -typedef char* char_ptr; - -property_list_t *new_property_list(); zpool_list_t *create_zpool_list_item(); void zprop_source_tostr(char *dst, zprop_source_t source); -zpool_list_t* zpool_list_open(libzfs_handle_t *libzfs, const char *name); -int zpool_list(libzfs_handle_t *libzfs, zpool_list_t **first); +zpool_list_t* zpool_list_open(const char *name); +zpool_list_ptr zpool_list_openall(); zpool_list_t *zpool_next(zpool_list_t *pool); +void zpool_list_free(zpool_list_t *list); void zpool_list_close(zpool_list_t *pool); -int read_zpool_property(zpool_handle_t *zh, property_list_t *list, int prop); -property_list_t *read_zpool_properties(zpool_handle_t *zh); +property_list_ptr read_zpool_property(zpool_list_ptr pool, int prop); +property_list_t *read_zpool_properties(zpool_list_ptr pool); property_list_t *next_property(property_list_t *list); -void free_properties(property_list_t *root); pool_state_t zpool_read_state(zpool_handle_t *zh); const char *lasterr(void); -int -add_prop_list(const char *propname, char *propval, nvlist_t **props, - boolean_t poolprop); +// int +// add_prop_list(const char *propname, char *propval, nvlist_t **props, +// boolean_t poolprop); nvlist_t** nvlist_alloc_array(int count); void nvlist_array_set(nvlist_t** a, int i, nvlist_t *item); void nvlist_free_array(nvlist_t **a); nvlist_t *nvlist_array_at(nvlist_t **a, uint_t i); -int nvlist_lookup_uint64_array_vds(nvlist_t *nv, const char *p, - vdev_stat_t **vds, uint_t *c); - -int nvlist_lookup_uint64_array_ps(nvlist_t *nv, const char *p, - pool_scan_stat_t **vds, uint_t *c); - int refresh_stats(zpool_list_t *pool); +const char *get_vdev_type(nvlist_ptr nv); +const vdev_stat_ptr get_vdev_stats(nvlist_ptr nv); +pool_scan_stat_ptr get_vdev_scan_stats(nvlist_t *nv); +vdev_children_ptr get_vdev_children(nvlist_t *nv); +const char *get_vdev_path(nvlist_ptr nv); +uint64_t get_vdev_is_log(nvlist_ptr nv); + +uint64_t get_zpool_state(nvlist_ptr nv); +uint64_t get_zpool_guid(nvlist_ptr nv); +const char *get_zpool_name(nvlist_ptr nv); +const char *get_zpool_comment(nvlist_ptr nv); + +nvlist_ptr get_zpool_vdev_tree(nvlist_ptr nv); + + char *sZPOOL_CONFIG_VERSION; char *sZPOOL_CONFIG_POOL_NAME; char *sZPOOL_CONFIG_POOL_STATE; diff --git a/zpool_test.go b/zpool_test.go index c55382c..e257d30 100644 --- a/zpool_test.go +++ b/zpool_test.go @@ -159,7 +159,7 @@ func zpoolTestPoolDestroy(t *testing.T) { return } defer p.Close() - if err = p.Destroy("Test of pool destroy (" + TSTPoolName + ")"); err != nil { + if err = p.Destroy(TSTPoolName); err != nil { t.Error(err.Error()) return } From 1b47551b8717193bb27f9f49501de5fb34c8064c Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Thu, 15 Jun 2017 14:12:39 +0200 Subject: [PATCH 27/36] - ZFS send/receive --- common.go | 5 + sendrecv.go | 269 ++++++++++++++++++++++++++++++++++++++++++++++++++++ zfs.c | 29 +++++- zfs.go | 2 +- zfs.h | 108 +++++++++++++++++++++ 5 files changed, 409 insertions(+), 4 deletions(-) create mode 100644 sendrecv.go diff --git a/common.go b/common.go index 0b0ab88..b7949d0 100644 --- a/common.go +++ b/common.go @@ -22,6 +22,7 @@ import "C" import ( "errors" + "sync" ) // VDevType type of device in the pool @@ -68,6 +69,10 @@ type Property struct { Source string } +var Global struct { + Mtx sync.Mutex +} + // Pool status const ( /* diff --git a/sendrecv.go b/sendrecv.go new file mode 100644 index 0000000..d78eb5b --- /dev/null +++ b/sendrecv.go @@ -0,0 +1,269 @@ +package zfs + +// #include +// #include +// #include "common.h" +// #include "zpool.h" +// #include "zfs.h" +import "C" +import ( + "fmt" + "os" + "path" + "strings" + "syscall" + "unsafe" +) + +type SendFlags struct { + Verbose bool + Replicate bool + DoAll bool + FromOrigin bool + Dedup bool + Props bool + DryRun bool + // Parsable bool + // Progress bool + LargeBlock bool + EmbedData bool + // Compress bool +} + +type RecvFlags struct { + Verbose bool + IsPrefix bool + IsTail bool + DryRun bool + Force bool + CanmountOff bool + Resumable bool + ByteSwap bool + NoMount bool +} + +func to_boolean_t(a bool) C.boolean_t { + if a { + return 1 + } + return 0 +} + +func to_sendflags_t(flags *SendFlags) (cflags *C.sendflags_t) { + cflags = C.alloc_sendflags() + cflags.verbose = to_boolean_t(flags.Verbose) + cflags.replicate = to_boolean_t(flags.Replicate) + cflags.doall = to_boolean_t(flags.DoAll) + cflags.fromorigin = to_boolean_t(flags.FromOrigin) + cflags.dedup = to_boolean_t(flags.Dedup) + cflags.props = to_boolean_t(flags.Props) + cflags.dryrun = to_boolean_t(flags.DryRun) + // cflags.parsable = to_boolean_t(flags.Parsable) + // cflags.progress = to_boolean_t(flags.Progress) + cflags.largeblock = to_boolean_t(flags.LargeBlock) + cflags.embed_data = to_boolean_t(flags.EmbedData) + // cflags.compress = to_boolean_t(flags.Compress) + return +} + +func to_recvflags_t(flags *RecvFlags) (cflags *C.recvflags_t) { + cflags = C.alloc_recvflags() + cflags.verbose = to_boolean_t(flags.Verbose) + cflags.isprefix = to_boolean_t(flags.IsPrefix) + cflags.istail = to_boolean_t(flags.IsTail) + cflags.dryrun = to_boolean_t(flags.DryRun) + cflags.force = to_boolean_t(flags.Force) + cflags.canmountoff = to_boolean_t(flags.CanmountOff) + // cflags.resumable = to_boolean_t(flags.Resumable) + cflags.byteswap = to_boolean_t(flags.ByteSwap) + cflags.nomount = to_boolean_t(flags.NoMount) + return +} + +func (d *Dataset) send(FromName string, outf *os.File, flags *SendFlags) (err error) { + var cfromname, ctoname *C.char + var dpath string + var pd Dataset + + if d.Type != DatasetTypeSnapshot || (len(FromName) > 0 && strings.Contains(FromName, "#")) { + err = fmt.Errorf( + "Unsupported method on filesystem or bookmark. Use func SendOne() for that purpose.") + return + } + + cflags := to_sendflags_t(flags) + defer C.free(unsafe.Pointer(cflags)) + if dpath, err = d.Path(); err != nil { + return + } + if len(FromName) > 0 { + if FromName[0] == '#' || FromName[0] == '@' { + FromName = dpath + FromName + } + cfromname = C.CString(FromName) + defer C.free(unsafe.Pointer(cfromname)) + } + sendparams := strings.Split(dpath, "@") + parent := sendparams[0] + ctoname = C.CString(sendparams[1]) + defer C.free(unsafe.Pointer(ctoname)) + if pd, err = DatasetOpen(parent); err != nil { + return + } + defer pd.Close() + cerr := C.zfs_send(pd.list.zh, cfromname, ctoname, cflags, C.int(outf.Fd()), nil, nil, nil) + if cerr != 0 { + err = LastError() + } + return +} + +func (d *Dataset) SendOne(FromName string, outf *os.File, flags *SendFlags) (err error) { + var cfromname, ctoname *C.char + var dpath string + var lzc_send_flags uint32 + + if d.Type == DatasetTypeSnapshot || (len(FromName) > 0 && !strings.Contains(FromName, "#")) { + err = fmt.Errorf( + "Unsupported with snapshot. Use func Send() for that purpose.") + return + } + if flags.Replicate || flags.DoAll || flags.Props || flags.Dedup || flags.DryRun { + err = fmt.Errorf("Unsupported flag with filesystem or bookmark.") + return + } + + if flags.LargeBlock { + lzc_send_flags |= C.LZC_SEND_FLAG_LARGE_BLOCK + } + if flags.EmbedData { + lzc_send_flags |= C.LZC_SEND_FLAG_EMBED_DATA + } + // if (flags.Compress) + // lzc_send_flags |= LZC_SEND_FLAG_COMPRESS; + if dpath, err = d.Path(); err != nil { + return + } + if len(FromName) > 0 { + if FromName[0] == '#' || FromName[0] == '@' { + FromName = dpath + FromName + } + cfromname = C.CString(FromName) + defer C.free(unsafe.Pointer(cfromname)) + } + ctoname = C.CString(path.Base(dpath)) + defer C.free(unsafe.Pointer(ctoname)) + cerr := C.zfs_send_one(d.list.zh, cfromname, C.int(outf.Fd()), lzc_send_flags) + if cerr != 0 { + err = LastError() + } + return +} + +func (d *Dataset) Send(outf *os.File, flags SendFlags) (err error) { + if flags.Replicate { + flags.DoAll = true + } + err = d.send("", outf, &flags) + return +} + +func (d *Dataset) SendFrom(FromName string, outf *os.File, flags SendFlags) (err error) { + var porigin Property + var from, dest []string + if err = d.ReloadProperties(); err != nil { + return + } + porigin, _ = d.GetProperty(DatasetPropOrigin) + if len(porigin.Value) > 0 && porigin.Value == FromName { + FromName = "" + flags.FromOrigin = true + } else { + var dpath string + if dpath, err = d.Path(); err != nil { + return + } + dest = strings.Split(dpath, "@") + from = strings.Split(FromName, "@") + + if len(from[0]) > 0 && from[0] != dest[0] { + err = fmt.Errorf("Incremental source must be in same filesystem.") + return + } + if len(from) < 2 || strings.Contains(from[1], "@") || strings.Contains(from[1], "/") { + err = fmt.Errorf("Invalid incremental source.") + return + } + } + err = d.send(from[1], outf, &flags) + return +} + +func (d *Dataset) SendSize(FromName string, flags SendFlags) (size uint64, err error) { + var porigin Property + var from Dataset + var dpath string + if dpath, err = d.Path(); err != nil { + return + } + zc := C.new_zfs_cmd() + defer C.free(unsafe.Pointer(zc)) + dpath = strings.Split(dpath, "@")[0] + if len(FromName) > 0 { + + if FromName[0] == '#' || FromName[0] == '@' { + FromName = dpath + FromName + } + porigin, _ = d.GetProperty(DatasetPropOrigin) + if len(porigin.Value) > 0 && porigin.Value == FromName { + FromName = "" + flags.FromOrigin = true + } + if from, err = DatasetOpen(FromName); err != nil { + return + } + zc.zc_fromobj = C.zfs_prop_get_int(from.list.zh, C.ZFS_PROP_OBJSETID) + from.Close() + } else { + zc.zc_fromobj = 0 + } + zc.zc_obj = C.uint64_t(to_boolean_t(flags.FromOrigin)) + zc.zc_sendobj = C.zfs_prop_get_int(d.list.zh, C.ZFS_PROP_OBJSETID) + zc.zc_guid = 1 + zc.zc_flags = 0 + if flags.LargeBlock { + zc.zc_flags |= C.LZC_SEND_FLAG_LARGE_BLOCK + } + if flags.EmbedData { + zc.zc_flags |= C.LZC_SEND_FLAG_EMBED_DATA + } + + // C.estimate_ioctl(d.list.zhp, prevsnap_obj, to_boolean_t(flags.FromOrigin), lzc_send_flags, unsafe.Pointer(&size)) + if ec, e := C.estimate_send_size(zc); ec != 0 { + err = fmt.Errorf("Failed to estimate send size. %s %d", e.Error(), e.(syscall.Errno)) + } + size = uint64(zc.zc_objset_type) + return +} + +func (d *Dataset) Receive(name string, inf *os.File, flags RecvFlags) (err error) { + var dpath string + if dpath, err = d.Path(); err != nil { + return + } + props := C.new_property_nvlist() + if props == nil { + err = fmt.Errorf("Out of memory func (d *Dataset) Recv()") + return + } + defer C.nvlist_free(props) + cflags := to_recvflags_t(&flags) + defer C.free(unsafe.Pointer(cflags)) + dest := C.CString(dpath + "/" + name) + defer C.free(unsafe.Pointer(dest)) + ec := C.zfs_receive(C.libzfsHandle, dest, cflags, C.int(inf.Fd()), nil) + if ec != 0 { + err = fmt.Errorf("ZFS receive of %s failed. %s", C.GoString(dest), LastError().Error()) + } + return +} diff --git a/zfs.c b/zfs.c index a54affc..96bd849 100644 --- a/zfs.c +++ b/zfs.c @@ -160,16 +160,16 @@ property_list_t *read_dataset_property(dataset_list_t *dataset, int prop) { int r = 0; zprop_source_t source; char statbuf[INT_MAX_VALUE]; - property_list_ptr list; + property_list_ptr list = NULL; list = new_property_list(); r = zfs_prop_get(dataset->zh, prop, list->value, INT_MAX_VALUE, &source, statbuf, INT_MAX_VALUE, 1); - if (r == 0) { + if (r == 0 && list != NULL) { // strcpy(list->name, zpool_prop_to_name(prop)); zprop_source_tostr(list->source, source); list->property = (int)prop; - } else { + } else if (list != NULL) { free_properties(list); list = NULL; } @@ -226,3 +226,26 @@ char** alloc_cstrings(int size) { void strings_setat(char **a, int at, char *v) { a[at] = v; } + + +sendflags_t *alloc_sendflags() { + sendflags_t *r = malloc(sizeof(sendflags_t)); + memset(r, 0, sizeof(sendflags_t)); + return r; +} +recvflags_t *alloc_recvflags() { + recvflags_t *r = malloc(sizeof(recvflags_t)); + memset(r, 0, sizeof(recvflags_t)); + return r; +} + +struct zfs_cmd *new_zfs_cmd(){ + struct zfs_cmd *cmd = malloc(sizeof(struct zfs_cmd)); + memset(cmd, 0, sizeof(struct zfs_cmd)); + return cmd; +} + +int estimate_send_size(struct zfs_cmd *zc) { + return zfs_ioctl(libzfsHandle, ZFS_IOC_SEND, zc); +} + diff --git a/zfs.go b/zfs.go index e4ec4f3..18caade 100644 --- a/zfs.go +++ b/zfs.go @@ -107,7 +107,7 @@ func DatasetOpen(path string) (d Dataset, err error) { if err == nil { err = fmt.Errorf("dataset not found.") } - println("open failed") + err = fmt.Errorf("%s - %s", err.Error(), path) return } d.Type = DatasetType(C.dataset_type(d.list)) diff --git a/zfs.h b/zfs.h index d70566e..3a43c82 100644 --- a/zfs.h +++ b/zfs.h @@ -10,6 +10,107 @@ struct dataset_list { void *pnext; }; +typedef struct zfs_share { + uint64_t z_exportdata; + uint64_t z_sharedata; + uint64_t z_sharetype; /* 0 = share, 1 = unshare */ + uint64_t z_sharemax; /* max length of share string */ +} zfs_share_t; + +struct drr_begin { + uint64_t drr_magic; + uint64_t drr_versioninfo; /* was drr_version */ + uint64_t drr_creation_time; + dmu_objset_type_t drr_type; + uint32_t drr_flags; + uint64_t drr_toguid; + uint64_t drr_fromguid; + char drr_toname[MAXNAMELEN]; +} drr_begin; + +/* + * A limited number of zpl level stats are retrievable + * with an ioctl. zfs diff is the current consumer. + */ +typedef struct zfs_stat { + uint64_t zs_gen; + uint64_t zs_mode; + uint64_t zs_links; + uint64_t zs_ctime[2]; +} zfs_stat_t; + +typedef struct zinject_record { + uint64_t zi_objset; + uint64_t zi_object; + uint64_t zi_start; + uint64_t zi_end; + uint64_t zi_guid; + uint32_t zi_level; + uint32_t zi_error; + uint64_t zi_type; + uint32_t zi_freq; + uint32_t zi_failfast; + char zi_func[MAXNAMELEN]; + uint32_t zi_iotype; + int32_t zi_duration; + uint64_t zi_timer; + uint64_t zi_nlanes; + uint32_t zi_cmd; + uint32_t zi_pad; +} zinject_record_t; + +typedef struct dmu_objset_stats { + uint64_t dds_num_clones; /* number of clones of this */ + uint64_t dds_creation_txg; + uint64_t dds_guid; + dmu_objset_type_t dds_type; + uint8_t dds_is_snapshot; + uint8_t dds_inconsistent; + char dds_origin[ZFS_MAX_DATASET_NAME_LEN]; +} dmu_objset_stats_t; + +typedef struct zfs_cmd { + char zc_name[MAXPATHLEN]; /* name of pool or dataset */ + uint64_t zc_nvlist_src; /* really (char *) */ + uint64_t zc_nvlist_src_size; + uint64_t zc_nvlist_dst; /* really (char *) */ + uint64_t zc_nvlist_dst_size; + boolean_t zc_nvlist_dst_filled; /* put an nvlist in dst? */ + int zc_pad2; + + /* + * The following members are for legacy ioctls which haven't been + * converted to the new method. + */ + uint64_t zc_history; /* really (char *) */ + char zc_value[MAXPATHLEN * 2]; + char zc_string[MAXNAMELEN]; + uint64_t zc_guid; + uint64_t zc_nvlist_conf; /* really (char *) */ + uint64_t zc_nvlist_conf_size; + uint64_t zc_cookie; + uint64_t zc_objset_type; + uint64_t zc_perm_action; + uint64_t zc_history_len; + uint64_t zc_history_offset; + uint64_t zc_obj; + uint64_t zc_iflags; /* internal to zfs(7fs) */ + zfs_share_t zc_share; + dmu_objset_stats_t zc_objset_stats; + struct drr_begin zc_begin_record; + zinject_record_t zc_inject_record; + uint32_t zc_defer_destroy; + uint32_t zc_flags; + uint64_t zc_action_handle; + int zc_cleanup_fd; + uint8_t zc_simple; + uint8_t zc_pad[3]; /* alignment */ + uint64_t zc_sendobj; + uint64_t zc_fromobj; + uint64_t zc_createtxg; + zfs_stat_t zc_stat; +} zfs_cmd_t; + typedef struct dataset_list dataset_list_t; typedef struct dataset_list* dataset_list_ptr; @@ -46,5 +147,12 @@ property_list_t *read_user_property(dataset_list_t *dataset, const char* prop); char** alloc_cstrings(int size); void strings_setat(char **a, int at, char *v); +sendflags_t *alloc_sendflags(); +recvflags_t *alloc_recvflags(); + + +struct zfs_cmd *new_zfs_cmd(); +int estimate_send_size(struct zfs_cmd *zc); + #endif /* SERVERWARE_ZFS_H */ From 307acf899a3ee2cb776fa3afcb2f54e6c4a53a58 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Fri, 4 Aug 2017 13:12:12 +0200 Subject: [PATCH 28/36] - Fix bug, zpool export failing --- zpool.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/zpool.go b/zpool.go index d6aefda..85307dc 100644 --- a/zpool.go +++ b/zpool.go @@ -909,6 +909,10 @@ func (pool *Pool) Export(force bool, log string) (err error) { } csLog := C.CString(log) defer C.free(unsafe.Pointer(csLog)) + if rc := C.zpool_disable_datasets(pool.list.zph, forcet); rc != 0 { + err = LastError() + return + } if rc := C.zpool_export(pool.list.zph, forcet, csLog); rc != 0 { err = LastError() } From a7726113d41e863772580d62ba116c635183f9c9 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Fri, 4 Aug 2017 13:12:41 +0200 Subject: [PATCH 29/36] - Bug fixes for send recv, and mount --- sendrecv.go | 4 +-- zfs.c | 22 ++++++++++----- zfs.go | 78 ++++++++++++++++++++++++++++++++++++++++------------- 3 files changed, 77 insertions(+), 27 deletions(-) diff --git a/sendrecv.go b/sendrecv.go index d78eb5b..89937de 100644 --- a/sendrecv.go +++ b/sendrecv.go @@ -246,7 +246,7 @@ func (d *Dataset) SendSize(FromName string, flags SendFlags) (size uint64, err e return } -func (d *Dataset) Receive(name string, inf *os.File, flags RecvFlags) (err error) { +func (d *Dataset) Receive(inf *os.File, flags RecvFlags) (err error) { var dpath string if dpath, err = d.Path(); err != nil { return @@ -259,7 +259,7 @@ func (d *Dataset) Receive(name string, inf *os.File, flags RecvFlags) (err error defer C.nvlist_free(props) cflags := to_recvflags_t(&flags) defer C.free(unsafe.Pointer(cflags)) - dest := C.CString(dpath + "/" + name) + dest := C.CString(dpath) defer C.free(unsafe.Pointer(dest)) ec := C.zfs_receive(C.libzfsHandle, dest, cflags, C.int(inf.Fd()), nil) if ec != 0 { diff --git a/zfs.c b/zfs.c index 96bd849..09ba095 100644 --- a/zfs.c +++ b/zfs.c @@ -19,8 +19,13 @@ dataset_list_t *create_dataset_list_item() { } void dataset_list_close(dataset_list_t *list) { - zfs_close(list->zh); - free(list); + if (list != NULL) { + if (list->zh != NULL) { + zfs_close(list->zh); + list->zh = NULL; + } + free(list); + } // dataset_list_free(list); } @@ -132,15 +137,20 @@ int dataset_rename(dataset_list_ptr dataset, const char* new_name, boolean_t rec } const char *dataset_is_mounted(dataset_list_ptr dataset){ - char *mp; + char *mp = NULL; + // zfs_is_mounted returns B_TRUE or B_FALSE if (0 != zfs_is_mounted(dataset->zh, &mp)) { - return NULL; + return mp; } - return mp; + return NULL; } int dataset_mount(dataset_list_ptr dataset, const char *options, int flags) { - return zfs_mount(dataset->zh, options, flags); + if ( 0 < strlen(options)) { + return zfs_mount(dataset->zh, options, flags); + } else { + return zfs_mount(dataset->zh, NULL, flags); + } } int dataset_unmount(dataset_list_ptr dataset, int flags) { diff --git a/zfs.go b/zfs.go index 18caade..b76b0d8 100644 --- a/zfs.go +++ b/zfs.go @@ -10,6 +10,7 @@ import "C" import ( "errors" "fmt" + "strings" "unsafe" ) @@ -114,7 +115,6 @@ func DatasetOpen(path string) (d Dataset, err error) { d.Properties = make(map[Prop]Property) err = d.ReloadProperties() if err != nil { - println("reload properties failed") return } err = d.openChildren() @@ -165,10 +165,9 @@ func DatasetCreate(path string, dtype DatasetType, // Close close dataset and all its recursive children datasets (close handle // and cleanup dataset object/s from memory) func (d *Dataset) Close() { - if d.list != nil && d.list.zh != nil { - C.dataset_list_close(d.list) - d.list = nil - } + // path, _ := d.Path() + C.dataset_list_close(d.list) + d.list = nil for _, cd := range d.Children { cd.Close() } @@ -204,18 +203,51 @@ func (d *Dataset) Destroy(Defer bool) (err error) { // DestroyRecursive recursively destroy children of dataset and dataset. func (d *Dataset) DestroyRecursive() (err error) { - if len(d.Children) > 0 { - for _, c := range d.Children { - if err = c.DestroyRecursive(); err != nil { - return - } - // close handle to destroyed child dataset - c.Close() - } - // clear closed children array - d.Children = make([]Dataset, 0) + var path string + if path, err = d.Path(); err != nil { + return + } + if !strings.Contains(path, "@") { // not snapshot + if len(d.Children) > 0 { + for _, c := range d.Children { + if err = c.DestroyRecursive(); err != nil { + return + } + // close handle to destroyed child dataset + c.Close() + } + // clear closed children array + d.Children = make([]Dataset, 0) + } + err = d.Destroy(false) + } else { + var parent Dataset + tmp := strings.Split(path, "@") + ppath, snapname := tmp[0], tmp[1] + if parent, err = DatasetOpen(ppath); err != nil { + return + } + defer parent.Close() + if len(parent.Children) > 0 { + for _, c := range parent.Children { + if path, err = c.Path(); err != nil { + return + } + if strings.Contains(path, "@") { + continue // skip other snapshots + } + if c, err = DatasetOpen(path + "@" + snapname); err != nil { + continue + } + if err = c.DestroyRecursive(); err != nil { + c.Close() + return + } + c.Close() + } + } + err = d.Destroy(false) } - err = d.Destroy(false) return } @@ -241,6 +273,8 @@ func (d *Dataset) ReloadProperties() (err error) { return } d.Properties = make(map[Prop]Property) + Global.Mtx.Lock() + defer Global.Mtx.Unlock() for prop := DatasetPropType; prop < DatasetNumProps; prop++ { plist := C.read_dataset_property(d.list, C.int(prop)) if plist == nil { @@ -434,6 +468,7 @@ func (d *Dataset) IsMounted() (mounted bool, where string) { // defer C.free(mp) if mounted = (mp != nil); mounted { where = C.GoString(mp) + C.free(unsafe.Pointer(mp)) } return } @@ -471,10 +506,15 @@ func (d *Dataset) UnmountAll(flags int) (err error) { err = errors.New(msgDatasetIsNil) return } - if ec := C.dataset_unmountall(d.list, C.int(flags)); ec != 0 { - err = LastError() + // This is implemented recursive because zfs_unmountall() didn't work + if len(d.Children) > 0 { + for _, c := range d.Children { + if err = c.UnmountAll(flags); err != nil { + return + } + } } - return + return d.Unmount(flags) } // DatasetPropertyToName convert property to name From 006e8a798a35d9387ff0cc3318e963c3ec5be683 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Thu, 7 Dec 2017 09:54:07 +0100 Subject: [PATCH 30/36] - Small fixes --- zfs.h | 12 ----- zpool.go | 7 ++- zpool.h | 146 +++++++++++++++++++++++++++---------------------------- 3 files changed, 78 insertions(+), 87 deletions(-) diff --git a/zfs.h b/zfs.h index 3a43c82..a022ab4 100644 --- a/zfs.h +++ b/zfs.h @@ -17,17 +17,6 @@ typedef struct zfs_share { uint64_t z_sharemax; /* max length of share string */ } zfs_share_t; -struct drr_begin { - uint64_t drr_magic; - uint64_t drr_versioninfo; /* was drr_version */ - uint64_t drr_creation_time; - dmu_objset_type_t drr_type; - uint32_t drr_flags; - uint64_t drr_toguid; - uint64_t drr_fromguid; - char drr_toname[MAXNAMELEN]; -} drr_begin; - /* * A limited number of zpl level stats are retrievable * with an ioctl. zfs diff is the current consumer. @@ -97,7 +86,6 @@ typedef struct zfs_cmd { uint64_t zc_iflags; /* internal to zfs(7fs) */ zfs_share_t zc_share; dmu_objset_stats_t zc_objset_stats; - struct drr_begin zc_begin_record; zinject_record_t zc_inject_record; uint32_t zc_defer_destroy; uint32_t zc_flags; diff --git a/zpool.go b/zpool.go index 85307dc..9739352 100644 --- a/zpool.go +++ b/zpool.go @@ -266,6 +266,9 @@ func PoolImportSearch(searchpaths []string) (epools []ExportedPool, err error) { } ep.State = PoolState(C.get_zpool_state(config)) + if ep.State != PoolStateExported { + continue + } if cname = C.get_zpool_name(config); cname == nil { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_POOL_NAME) @@ -351,8 +354,8 @@ func poolSearchImport(q string, searchpaths []string, guid bool) (name string, } name = C.GoString(cname) } - if retcode := C.zpool_import(C.libzfsHandle, config, cname, - nil); retcode != 0 { + if retcode := C.zpool_import_props(C.libzfsHandle, config, cname, + nil, C.ZFS_IMPORT_NORMAL); retcode != 0 { err = LastError() return } diff --git a/zpool.h b/zpool.h index b86b9bd..e29c24d 100644 --- a/zpool.h +++ b/zpool.h @@ -67,79 +67,79 @@ const char *get_zpool_comment(nvlist_ptr nv); nvlist_ptr get_zpool_vdev_tree(nvlist_ptr nv); -char *sZPOOL_CONFIG_VERSION; -char *sZPOOL_CONFIG_POOL_NAME; -char *sZPOOL_CONFIG_POOL_STATE; -char *sZPOOL_CONFIG_POOL_TXG; -char *sZPOOL_CONFIG_POOL_GUID; -char *sZPOOL_CONFIG_CREATE_TXG; -char *sZPOOL_CONFIG_TOP_GUID; -char *sZPOOL_CONFIG_VDEV_TREE; -char *sZPOOL_CONFIG_TYPE; -char *sZPOOL_CONFIG_CHILDREN; -char *sZPOOL_CONFIG_ID; -char *sZPOOL_CONFIG_GUID; -char *sZPOOL_CONFIG_PATH; -char *sZPOOL_CONFIG_DEVID; -char *sZPOOL_CONFIG_METASLAB_ARRAY; -char *sZPOOL_CONFIG_METASLAB_SHIFT; -char *sZPOOL_CONFIG_ASHIFT; -char *sZPOOL_CONFIG_ASIZE; -char *sZPOOL_CONFIG_DTL; -char *sZPOOL_CONFIG_SCAN_STATS; -char *sZPOOL_CONFIG_VDEV_STATS; -char *sZPOOL_CONFIG_WHOLE_DISK; -char *sZPOOL_CONFIG_ERRCOUNT; -char *sZPOOL_CONFIG_NOT_PRESENT; -char *sZPOOL_CONFIG_SPARES; -char *sZPOOL_CONFIG_IS_SPARE; -char *sZPOOL_CONFIG_NPARITY; -char *sZPOOL_CONFIG_HOSTID; -char *sZPOOL_CONFIG_HOSTNAME; -char *sZPOOL_CONFIG_LOADED_TIME; -char *sZPOOL_CONFIG_UNSPARE; -char *sZPOOL_CONFIG_PHYS_PATH; -char *sZPOOL_CONFIG_IS_LOG; -char *sZPOOL_CONFIG_L2CACHE; -char *sZPOOL_CONFIG_HOLE_ARRAY; -char *sZPOOL_CONFIG_VDEV_CHILDREN; -char *sZPOOL_CONFIG_IS_HOLE; -char *sZPOOL_CONFIG_DDT_HISTOGRAM; -char *sZPOOL_CONFIG_DDT_OBJ_STATS; -char *sZPOOL_CONFIG_DDT_STATS; -char *sZPOOL_CONFIG_SPLIT; -char *sZPOOL_CONFIG_ORIG_GUID; -char *sZPOOL_CONFIG_SPLIT_GUID; -char *sZPOOL_CONFIG_SPLIT_LIST; -char *sZPOOL_CONFIG_REMOVING; -char *sZPOOL_CONFIG_RESILVER_TXG; -char *sZPOOL_CONFIG_COMMENT; -char *sZPOOL_CONFIG_SUSPENDED; -char *sZPOOL_CONFIG_TIMESTAMP; -char *sZPOOL_CONFIG_BOOTFS; -char *sZPOOL_CONFIG_MISSING_DEVICES; -char *sZPOOL_CONFIG_LOAD_INFO; -char *sZPOOL_CONFIG_REWIND_INFO; -char *sZPOOL_CONFIG_UNSUP_FEAT; -char *sZPOOL_CONFIG_ENABLED_FEAT; -char *sZPOOL_CONFIG_CAN_RDONLY; -char *sZPOOL_CONFIG_FEATURES_FOR_READ; -char *sZPOOL_CONFIG_FEATURE_STATS; -char *sZPOOL_CONFIG_ERRATA; -char *sZPOOL_CONFIG_OFFLINE; -char *sZPOOL_CONFIG_FAULTED; -char *sZPOOL_CONFIG_DEGRADED; -char *sZPOOL_CONFIG_REMOVED; -char *sZPOOL_CONFIG_FRU; -char *sZPOOL_CONFIG_AUX_STATE; -char *sZPOOL_REWIND_POLICY; -char *sZPOOL_REWIND_REQUEST; -char *sZPOOL_REWIND_REQUEST_TXG; -char *sZPOOL_REWIND_META_THRESH; -char *sZPOOL_REWIND_DATA_THRESH; -char *sZPOOL_CONFIG_LOAD_TIME; -char *sZPOOL_CONFIG_LOAD_DATA_ERRORS; -char *sZPOOL_CONFIG_REWIND_TIME; +extern char *sZPOOL_CONFIG_VERSION; +extern char *sZPOOL_CONFIG_POOL_NAME; +extern char *sZPOOL_CONFIG_POOL_STATE; +extern char *sZPOOL_CONFIG_POOL_TXG; +extern char *sZPOOL_CONFIG_POOL_GUID; +extern char *sZPOOL_CONFIG_CREATE_TXG; +extern char *sZPOOL_CONFIG_TOP_GUID; +extern char *sZPOOL_CONFIG_VDEV_TREE; +extern char *sZPOOL_CONFIG_TYPE; +extern char *sZPOOL_CONFIG_CHILDREN; +extern char *sZPOOL_CONFIG_ID; +extern char *sZPOOL_CONFIG_GUID; +extern char *sZPOOL_CONFIG_PATH; +extern char *sZPOOL_CONFIG_DEVID; +extern char *sZPOOL_CONFIG_METASLAB_ARRAY; +extern char *sZPOOL_CONFIG_METASLAB_SHIFT; +extern char *sZPOOL_CONFIG_ASHIFT; +extern char *sZPOOL_CONFIG_ASIZE; +extern char *sZPOOL_CONFIG_DTL; +extern char *sZPOOL_CONFIG_SCAN_STATS; +extern char *sZPOOL_CONFIG_VDEV_STATS; +extern char *sZPOOL_CONFIG_WHOLE_DISK; +extern char *sZPOOL_CONFIG_ERRCOUNT; +extern char *sZPOOL_CONFIG_NOT_PRESENT; +extern char *sZPOOL_CONFIG_SPARES; +extern char *sZPOOL_CONFIG_IS_SPARE; +extern char *sZPOOL_CONFIG_NPARITY; +extern char *sZPOOL_CONFIG_HOSTID; +extern char *sZPOOL_CONFIG_HOSTNAME; +extern char *sZPOOL_CONFIG_LOADED_TIME; +extern char *sZPOOL_CONFIG_UNSPARE; +extern char *sZPOOL_CONFIG_PHYS_PATH; +extern char *sZPOOL_CONFIG_IS_LOG; +extern char *sZPOOL_CONFIG_L2CACHE; +extern char *sZPOOL_CONFIG_HOLE_ARRAY; +extern char *sZPOOL_CONFIG_VDEV_CHILDREN; +extern char *sZPOOL_CONFIG_IS_HOLE; +extern char *sZPOOL_CONFIG_DDT_HISTOGRAM; +extern char *sZPOOL_CONFIG_DDT_OBJ_STATS; +extern char *sZPOOL_CONFIG_DDT_STATS; +extern char *sZPOOL_CONFIG_SPLIT; +extern char *sZPOOL_CONFIG_ORIG_GUID; +extern char *sZPOOL_CONFIG_SPLIT_GUID; +extern char *sZPOOL_CONFIG_SPLIT_LIST; +extern char *sZPOOL_CONFIG_REMOVING; +extern char *sZPOOL_CONFIG_RESILVER_TXG; +extern char *sZPOOL_CONFIG_COMMENT; +extern char *sZPOOL_CONFIG_SUSPENDED; +extern char *sZPOOL_CONFIG_TIMESTAMP; +extern char *sZPOOL_CONFIG_BOOTFS; +extern char *sZPOOL_CONFIG_MISSING_DEVICES; +extern char *sZPOOL_CONFIG_LOAD_INFO; +extern char *sZPOOL_CONFIG_REWIND_INFO; +extern char *sZPOOL_CONFIG_UNSUP_FEAT; +extern char *sZPOOL_CONFIG_ENABLED_FEAT; +extern char *sZPOOL_CONFIG_CAN_RDONLY; +extern char *sZPOOL_CONFIG_FEATURES_FOR_READ; +extern char *sZPOOL_CONFIG_FEATURE_STATS; +extern char *sZPOOL_CONFIG_ERRATA; +extern char *sZPOOL_CONFIG_OFFLINE; +extern char *sZPOOL_CONFIG_FAULTED; +extern char *sZPOOL_CONFIG_DEGRADED; +extern char *sZPOOL_CONFIG_REMOVED; +extern char *sZPOOL_CONFIG_FRU; +extern char *sZPOOL_CONFIG_AUX_STATE; +extern char *sZPOOL_REWIND_POLICY; +extern char *sZPOOL_REWIND_REQUEST; +extern char *sZPOOL_REWIND_REQUEST_TXG; +extern char *sZPOOL_REWIND_META_THRESH; +extern char *sZPOOL_REWIND_DATA_THRESH; +extern char *sZPOOL_CONFIG_LOAD_TIME; +extern char *sZPOOL_CONFIG_LOAD_DATA_ERRORS; +extern char *sZPOOL_CONFIG_REWIND_TIME; #endif From 08a490350913f23895e719a6f17416fa3810566c Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Mon, 8 Jan 2018 15:29:42 +0100 Subject: [PATCH 31/36] - Ability to create pool with spares, and l2cache This breaks previous API PoolCreate function call, changes to fix this break will be necessary. --- zpool.c | 24 +++++ zpool.go | 268 ++++++++++++++++++++++++++++++++++---------------- zpool.h | 2 + zpool_test.go | 27 ++++- 4 files changed, 230 insertions(+), 91 deletions(-) diff --git a/zpool.c b/zpool.c index c63665a..6cd0d49 100644 --- a/zpool.c +++ b/zpool.c @@ -408,6 +408,30 @@ vdev_children_ptr get_vdev_children(nvlist_t *nv) { return children; } +vdev_children_ptr get_vdev_spares(nvlist_t *nv) { + int r; + vdev_children_ptr children = malloc(sizeof(vdev_children_t)); + memset(children, 0, sizeof(vdev_children_t)); + r = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &(children->first), &(children->count)); + if (r != 0) { + free(children); + return NULL; + } + return children; +} + +vdev_children_ptr get_vdev_l2cache(nvlist_t *nv) { + int r; + vdev_children_ptr children = malloc(sizeof(vdev_children_t)); + memset(children, 0, sizeof(vdev_children_t)); + r = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, &(children->first), &(children->count)); + if (r != 0) { + free(children); + return NULL; + } + return children; +} + const char *get_vdev_path(nvlist_ptr nv) { char *path = NULL; uint64_t notpresent = 0; diff --git a/zpool.go b/zpool.go index 9739352..9a6fba1 100644 --- a/zpool.go +++ b/zpool.go @@ -101,6 +101,9 @@ type PoolScanStat struct { type VDevTree struct { Type VDevType Devices []VDevTree // groups other devices (e.g. mirror) + Spares []VDevTree + L2Cache []VDevTree + Logs []VDevTree Parity uint Path string Name string @@ -233,6 +236,60 @@ func poolGetConfig(name string, nv C.nvlist_ptr) (vdevs VDevTree, err error) { } vdevs.Devices = append(vdevs.Devices, vdev) } + if vdevs.Spares, err = poolGetSpares(name, nv); err != nil { + return + } + if vdevs.L2Cache, err = poolGetL2Cache(name, nv); err != nil { + return + } + return +} + +func poolGetSpares(name string, nv C.nvlist_ptr) (vdevs []VDevTree, err error) { + // Fetch the spares + var spares C.vdev_children_ptr + spares = C.get_vdev_spares(nv) + if spares != nil { + // this object that reference spares and count should be deallocated from memory + defer C.free(unsafe.Pointer(spares)) + vdevs = make([]VDevTree, 0, spares.count) + } + for c := C.uint_t(0); spares != nil && c < spares.count; c++ { + vname := C.zpool_vdev_name(C.libzfsHandle, nil, C.nvlist_array_at(spares.first, c), + C.B_TRUE) + var vdev VDevTree + vdev, err = poolGetConfig(C.GoString(vname), + C.nvlist_array_at(spares.first, c)) + C.free(unsafe.Pointer(vname)) + if err != nil { + return + } + vdevs = append(vdevs, vdev) + } + return +} + +func poolGetL2Cache(name string, nv C.nvlist_ptr) (vdevs []VDevTree, err error) { + // Fetch the spares + var l2cache C.vdev_children_ptr + l2cache = C.get_vdev_l2cache(nv) + if l2cache != nil { + // this object that reference l2cache and count should be deallocated from memory + defer C.free(unsafe.Pointer(l2cache)) + vdevs = make([]VDevTree, 0, l2cache.count) + } + for c := C.uint_t(0); l2cache != nil && c < l2cache.count; c++ { + vname := C.zpool_vdev_name(C.libzfsHandle, nil, C.nvlist_array_at(l2cache.first, c), + C.B_TRUE) + var vdev VDevTree + vdev, err = poolGetConfig(C.GoString(vname), + C.nvlist_array_at(l2cache.first, c)) + C.free(unsafe.Pointer(vname)) + if err != nil { + return + } + vdevs = append(vdevs, vdev) + } return } @@ -651,7 +708,52 @@ func toCDatasetProperties(props DatasetProperties) (cprops C.nvlist_ptr) { return } -func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, +func buildVdev(vdev VDevTree, ashift int) (nvvdev *C.struct_nvlist, err error) { + if r := C.nvlist_alloc(&nvvdev, C.NV_UNIQUE_NAME, 0); r != 0 { + err = errors.New("Failed to allocate vdev") + return + } + csType := C.CString(string(vdev.Type)) + r := C.nvlist_add_string(nvvdev, C.sZPOOL_CONFIG_TYPE, + csType) + C.free(unsafe.Pointer(csType)) + if r != 0 { + err = errors.New("Failed to set vdev type") + return + } + if r := C.nvlist_add_uint64(nvvdev, C.sZPOOL_CONFIG_IS_LOG, + vdev.isLog()); r != 0 { + err = errors.New("Failed to allocate vdev (is_log)") + return + } + if r := C.nvlist_add_uint64(nvvdev, + C.sZPOOL_CONFIG_WHOLE_DISK, 1); r != 0 { + err = errors.New("Failed to allocate vdev nvvdev (whdisk)") + return + } + if len(vdev.Path) > 0 { + csPath := C.CString(vdev.Path) + r := C.nvlist_add_string( + nvvdev, C.sZPOOL_CONFIG_PATH, + csPath) + C.free(unsafe.Pointer(csPath)) + if r != 0 { + err = errors.New("Failed to allocate vdev nvvdev (type)") + return + } + if ashift > 0 { + if r := C.nvlist_add_uint64(nvvdev, + C.sZPOOL_CONFIG_ASHIFT, + C.uint64_t(ashift)); r != 0 { + err = errors.New("Failed to allocate vdev nvvdev (ashift)") + return + } + } + } + return +} + +func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs, spares, l2cache []VDevTree, props PoolProperties) (err error) { count := len(vdevs) if count == 0 { @@ -663,28 +765,9 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, return } defer C.nvlist_free_array(childrens) - spares := C.nvlist_alloc_array(C.int(count)) - if childrens == nil { - err = errors.New("No enough memory") - return - } - nspares := 0 - defer C.nvlist_free_array(spares) - l2cache := C.nvlist_alloc_array(C.int(count)) - if childrens == nil { - err = errors.New("No enough memory") - return - } - nl2cache := 0 - defer C.nvlist_free_array(l2cache) for i, vdev := range vdevs { grouping, mindevs, maxdevs := vdev.isGrouping() var child *C.struct_nvlist - // fmt.Println(vdev.Type) - if r := C.nvlist_alloc(&child, C.NV_UNIQUE_NAME, 0); r != 0 { - err = errors.New("Failed to allocate vdev") - return - } vcount := len(vdev.Devices) if vcount < mindevs || vcount > maxdevs { err = fmt.Errorf( @@ -692,20 +775,19 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, vdev.Type, mindevs, maxdevs) return } - csType := C.CString(string(vdev.Type)) - r := C.nvlist_add_string(child, C.sZPOOL_CONFIG_TYPE, - csType) - C.free(unsafe.Pointer(csType)) - if r != 0 { - err = errors.New("Failed to set vdev type") - return - } - if r := C.nvlist_add_uint64(child, C.sZPOOL_CONFIG_IS_LOG, - vdev.isLog()); r != 0 { - err = errors.New("Failed to allocate vdev (is_log)") - return - } if grouping { + if r := C.nvlist_alloc(&child, C.NV_UNIQUE_NAME, 0); r != 0 { + err = errors.New("Failed to allocate vdev") + return + } + csType := C.CString(string(vdev.Type)) + r := C.nvlist_add_string(child, C.sZPOOL_CONFIG_TYPE, + csType) + C.free(unsafe.Pointer(csType)) + if r != 0 { + err = errors.New("Failed to set vdev type") + return + } if vdev.Type == VDevTypeRaidz { r := C.nvlist_add_uint64(child, C.sZPOOL_CONFIG_NPARITY, @@ -715,49 +797,15 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, return } } - if err = buildVDevTree(child, vdev.Type, vdev.Devices, + if err = buildVDevTree(child, vdev.Type, vdev.Devices, nil, nil, props); err != nil { return } } else { - // if vdev.Type == VDevTypeDisk { - if r := C.nvlist_add_uint64(child, - C.sZPOOL_CONFIG_WHOLE_DISK, 1); r != 0 { - err = errors.New("Failed to allocate vdev child (whdisk)") + ashift, _ := strconv.Atoi(props[PoolPropAshift]) + if child, err = buildVdev(vdev, ashift); err != nil { return } - // } - if len(vdev.Path) > 0 { - csPath := C.CString(vdev.Path) - r := C.nvlist_add_string( - child, C.sZPOOL_CONFIG_PATH, - csPath) - C.free(unsafe.Pointer(csPath)) - if r != 0 { - err = errors.New("Failed to allocate vdev child (type)") - return - } - ashift, _ := strconv.Atoi(props[PoolPropAshift]) - if ashift > 0 { - if r := C.nvlist_add_uint64(child, - C.sZPOOL_CONFIG_ASHIFT, - C.uint64_t(ashift)); r != 0 { - err = errors.New("Failed to allocate vdev child (ashift)") - return - } - } - } - if vdev.Type == VDevTypeSpare { - C.nvlist_array_set(spares, C.int(nspares), child) - nspares++ - count-- - continue - } else if vdev.Type == VDevTypeL2cache { - C.nvlist_array_set(l2cache, C.int(nl2cache), child) - nl2cache++ - count-- - continue - } } C.nvlist_array_set(childrens, C.int(i), child) } @@ -768,31 +816,74 @@ func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, err = errors.New("Failed to allocate vdev children") return } - // fmt.Println("childs", root, count, rtype) - // debug.PrintStack() } - if nl2cache > 0 { - if r := C.nvlist_add_nvlist_array(root, - C.sZPOOL_CONFIG_L2CACHE, l2cache, - C.uint_t(nl2cache)); r != 0 { - err = errors.New("Failed to allocate vdev cache") + if len(spares) > 0 { + ashift, _ := strconv.Atoi(props[PoolPropAshift]) + if err = buildVdevSpares(root, VDevTypeRoot, spares, ashift); err != nil { return } } - if nspares > 0 { - if r := C.nvlist_add_nvlist_array(root, - C.sZPOOL_CONFIG_SPARES, spares, - C.uint_t(nspares)); r != 0 { - err = errors.New("Failed to allocate vdev spare") + if len(l2cache) > 0 { + ashift, _ := strconv.Atoi(props[PoolPropAshift]) + if err = buildVdevL2Cache(root, VDevTypeRoot, l2cache, ashift); err != nil { return } - // fmt.Println("spares", root, count) + } + return +} + +func buildVdevSpares(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, ashift int) (err error) { + count := len(vdevs) + if count == 0 { + return + } + spares := C.nvlist_alloc_array(C.int(count)) + if spares == nil { + err = errors.New("No enough memory buildVdevSpares") + return + } + defer C.nvlist_free_array(spares) + for i, vdev := range vdevs { + var child *C.struct_nvlist + if child, err = buildVdev(vdev, ashift); err != nil { + return + } + C.nvlist_array_set(spares, C.int(i), child) + } + if r := C.nvlist_add_nvlist_array(root, + C.sZPOOL_CONFIG_SPARES, spares, C.uint_t(len(vdevs))); r != 0 { + err = errors.New("Failed to allocate vdev spare") + } + return +} + +func buildVdevL2Cache(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, ashift int) (err error) { + count := len(vdevs) + if count == 0 { + return + } + l2cache := C.nvlist_alloc_array(C.int(count)) + if l2cache == nil { + err = errors.New("No enough memory buildVdevL2Cache") + return + } + defer C.nvlist_free_array(l2cache) + for i, vdev := range vdevs { + var child *C.struct_nvlist + if child, err = buildVdev(vdev, ashift); err != nil { + return + } + C.nvlist_array_set(l2cache, C.int(i), child) + } + if r := C.nvlist_add_nvlist_array(root, + C.sZPOOL_CONFIG_SPARES, l2cache, C.uint_t(len(vdevs))); r != 0 { + err = errors.New("Failed to allocate vdev l2cache") } return } // PoolCreate create ZFS pool per specs, features and properties of pool and root dataset -func PoolCreate(name string, vdevs []VDevTree, features map[string]string, +func PoolCreate(name string, vdev VDevTree, features map[string]string, props PoolProperties, fsprops DatasetProperties) (pool Pool, err error) { // create root vdev nvroot var nvroot *C.struct_nvlist @@ -811,7 +902,7 @@ func PoolCreate(name string, vdevs []VDevTree, features map[string]string, defer C.nvlist_free(nvroot) // Now we need to build specs (vdev hierarchy) - if err = buildVDevTree(nvroot, VDevTypeRoot, vdevs, props); err != nil { + if err = buildVDevTree(nvroot, VDevTypeRoot, vdev.Devices, vdev.Spares, vdev.L2Cache, props); err != nil { return } @@ -948,7 +1039,12 @@ func (pool *Pool) VDevTree() (vdevs VDevTree, err error) { if poolName, err = pool.Name(); err != nil { return } - return poolGetConfig(poolName, nvroot) + if vdevs, err = poolGetConfig(poolName, nvroot); err != nil { + return + } + vdevs.Spares, err = poolGetSpares(poolName, nvroot) + vdevs.L2Cache, err = poolGetL2Cache(poolName, nvroot) + return } func (s PoolState) String() string { diff --git a/zpool.h b/zpool.h index e29c24d..0961a36 100644 --- a/zpool.h +++ b/zpool.h @@ -56,6 +56,8 @@ const char *get_vdev_type(nvlist_ptr nv); const vdev_stat_ptr get_vdev_stats(nvlist_ptr nv); pool_scan_stat_ptr get_vdev_scan_stats(nvlist_t *nv); vdev_children_ptr get_vdev_children(nvlist_t *nv); +vdev_children_ptr get_vdev_spares(nvlist_t *nv); +vdev_children_ptr get_vdev_l2cache(nvlist_t *nv); const char *get_vdev_path(nvlist_ptr nv); uint64_t get_vdev_is_log(nvlist_ptr nv); diff --git a/zpool_test.go b/zpool_test.go index e257d30..8740d7b 100644 --- a/zpool_test.go +++ b/zpool_test.go @@ -84,6 +84,7 @@ func zpoolTestPoolCreate(t *testing.T) { disks := [2]string{s1path, s2path} + var vdev zfs.VDevTree var vdevs, mdevs, sdevs []zfs.VDevTree for _, d := range disks { mdevs = append(mdevs, @@ -93,8 +94,9 @@ func zpoolTestPoolCreate(t *testing.T) { {Type: zfs.VDevTypeFile, Path: s3path}} vdevs = []zfs.VDevTree{ zfs.VDevTree{Type: zfs.VDevTypeMirror, Devices: mdevs}, - zfs.VDevTree{Type: zfs.VDevTypeSpare, Devices: sdevs}, } + vdev.Devices = vdevs + vdev.Spares = sdevs props := make(map[zfs.Prop]string) fsprops := make(map[zfs.Prop]string) @@ -104,7 +106,7 @@ func zpoolTestPoolCreate(t *testing.T) { features["empty_bpobj"] = zfs.FENABLED features["lz4_compress"] = zfs.FENABLED - pool, err := zfs.PoolCreate(TSTPoolName, vdevs, features, props, fsprops) + pool, err := zfs.PoolCreate(TSTPoolName, vdev, features, props, fsprops) if err != nil { t.Error(err) // try cleanup @@ -231,6 +233,19 @@ func printVDevTree(vt zfs.VDevTree, pref string) { for _, v := range vt.Devices { printVDevTree(v, " "+pref) } + if len(vt.Spares) > 0 { + fmt.Println("spares:") + for _, v := range vt.Spares { + printVDevTree(v, " "+pref) + } + } + + if len(vt.L2Cache) > 0 { + fmt.Println("l2cache:") + for _, v := range vt.L2Cache { + printVDevTree(v, " "+pref) + } + } } func zpoolTestPoolImportSearch(t *testing.T) { @@ -249,7 +264,6 @@ func zpoolTestPoolImportSearch(t *testing.T) { fmt.Printf("%-30s | %-10s | %-10s | %s\n", "NAME", "TYPE", "STATE", "PATH") println("---------------------------------------------------------------") printVDevTree(p.VDevs, "") - } print("PASS\n\n") } @@ -408,6 +422,7 @@ func ExamplePoolOpenAll() { func ExamplePoolCreate() { disks := [2]string{"/dev/disk/by-id/ATA-123", "/dev/disk/by-id/ATA-456"} + var vdev zfs.VDevTree var vdevs, mdevs, sdevs []zfs.VDevTree // build mirror devices specs @@ -423,9 +438,11 @@ func ExamplePoolCreate() { // pool specs vdevs = []zfs.VDevTree{ zfs.VDevTree{Type: zfs.VDevTypeMirror, Devices: mdevs}, - zfs.VDevTree{Type: zfs.VDevTypeSpare, Devices: sdevs}, } + vdev.Devices = vdevs + vdev.Spares = sdevs + // pool properties props := make(map[zfs.Prop]string) // root dataset filesystem properties @@ -443,7 +460,7 @@ func ExamplePoolCreate() { // Based on specs formed above create test pool as 2 disk mirror and // one spare disk - pool, err := zfs.PoolCreate("TESTPOOL", vdevs, features, props, fsprops) + pool, err := zfs.PoolCreate("TESTPOOL", vdev, features, props, fsprops) if err != nil { println("Error: ", err.Error()) return From f5a73ad14f12b221060b69662dc25eede342dcca Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Wed, 28 Mar 2018 10:24:47 +0200 Subject: [PATCH 32/36] Return logs (ZIL) vdev as well in VDevTree --- .gitignore | 1 + zpool.go | 18 +++++++----------- zpool_test.go | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 11 deletions(-) diff --git a/.gitignore b/.gitignore index cf468c9..1fcc717 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ .gitconfig *.sublime-* +go-libzfs.test diff --git a/zpool.go b/zpool.go index 9a6fba1..7a3f547 100644 --- a/zpool.go +++ b/zpool.go @@ -103,7 +103,7 @@ type VDevTree struct { Devices []VDevTree // groups other devices (e.g. mirror) Spares []VDevTree L2Cache []VDevTree - Logs []VDevTree + Logs *VDevTree Parity uint Path string Name string @@ -222,9 +222,7 @@ func poolGetConfig(name string, nv C.nvlist_ptr) (vdevs VDevTree, err error) { var islog = C.uint64_t(C.B_FALSE) islog = C.get_vdev_is_log(C.nvlist_array_at(children.first, c)) - if islog != C.B_FALSE { - continue - } + vname := C.zpool_vdev_name(C.libzfsHandle, nil, C.nvlist_array_at(children.first, c), C.B_TRUE) var vdev VDevTree @@ -234,13 +232,11 @@ func poolGetConfig(name string, nv C.nvlist_ptr) (vdevs VDevTree, err error) { if err != nil { return } - vdevs.Devices = append(vdevs.Devices, vdev) - } - if vdevs.Spares, err = poolGetSpares(name, nv); err != nil { - return - } - if vdevs.L2Cache, err = poolGetL2Cache(name, nv); err != nil { - return + if islog != C.B_FALSE { + vdevs.Logs = &vdev + } else { + vdevs.Devices = append(vdevs.Devices, vdev) + } } return } diff --git a/zpool_test.go b/zpool_test.go index 8740d7b..3ea75eb 100644 --- a/zpool_test.go +++ b/zpool_test.go @@ -1,6 +1,7 @@ package zfs_test import ( + "encoding/json" "fmt" "io/ioutil" "os" @@ -529,3 +530,34 @@ func ExamplePool_State() { } println("POOL TESTPOOL state:", zfs.PoolStateToName(pstate)) } + +func TestPool_VDevTree(t *testing.T) { + type fields struct { + poolName string + } + tests := []struct { + name string + fields fields + wantErr bool + }{ + // TODO: Add test cases. + { + name: "test1", + fields: fields{"NETSTOR"}, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pool, _ := zfs.PoolOpen(tt.fields.poolName) + defer pool.Close() + gotVdevs, err := pool.VDevTree() + if (err != nil) != tt.wantErr { + t.Errorf("Pool.VDevTree() error = %v, wantErr %v", err, tt.wantErr) + return + } + jsonData, _ := json.MarshalIndent(gotVdevs, "", "\t") + t.Logf("gotVdevs: %s", string(jsonData)) + }) + } +} From b1b9ae5efcc9da70c03ccc7354a416a6d7cd74f9 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Tue, 10 Apr 2018 13:45:02 +0200 Subject: [PATCH 33/36] - Fix PoolImportSearch not listing pools that are not explicitly exported --- zpool.c | 10 ++++++++++ zpool.go | 5 +---- zpool.h | 2 ++ 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/zpool.c b/zpool.c index 6cd0d49..a6c982b 100644 --- a/zpool.c +++ b/zpool.c @@ -486,4 +486,14 @@ nvlist_ptr get_zpool_vdev_tree(nvlist_ptr nv) { return NULL; } return vdev_tree; +} + + +nvlist_ptr go_zpool_search_import(libzfs_handle_ptr zfsh, int paths, char **path, boolean_t do_scan) { + importargs_t idata = { 0 }; + idata.path = path; + idata.paths = paths; + // idata.scan = 0; + + return zpool_search_import(zfsh, &idata); } \ No newline at end of file diff --git a/zpool.go b/zpool.go index 7a3f547..0cc47a0 100644 --- a/zpool.go +++ b/zpool.go @@ -307,7 +307,7 @@ func PoolImportSearch(searchpaths []string) (epools []ExportedPool, err error) { C.strings_setat(cpaths, C.int(i), csPath) } - pools := C.zpool_find_import(C.libzfsHandle, C.int(numofp), cpaths) + pools := C.go_zpool_search_import(C.libzfsHandle, C.int(numofp), cpaths, C.B_FALSE) defer C.nvlist_free(pools) elem = C.nvlist_next_nvpair(pools, elem) epools = make([]ExportedPool, 0, 1) @@ -319,9 +319,6 @@ func PoolImportSearch(searchpaths []string) (epools []ExportedPool, err error) { } ep.State = PoolState(C.get_zpool_state(config)) - if ep.State != PoolStateExported { - continue - } if cname = C.get_zpool_name(config); cname == nil { err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_POOL_NAME) diff --git a/zpool.h b/zpool.h index 0961a36..cdb5892 100644 --- a/zpool.h +++ b/zpool.h @@ -68,6 +68,8 @@ const char *get_zpool_comment(nvlist_ptr nv); nvlist_ptr get_zpool_vdev_tree(nvlist_ptr nv); +nvlist_ptr go_zpool_search_import(libzfs_handle_ptr zfsh, int paths, char **path, boolean_t do_scan); + extern char *sZPOOL_CONFIG_VERSION; extern char *sZPOOL_CONFIG_POOL_NAME; From 44a53fa2e407a2bcc8b79c5c97f44f6f9928aa4e Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Thu, 12 Apr 2018 11:27:31 +0200 Subject: [PATCH 34/36] Fix bug: Overlay property not updated --- a_test.go | 1 + common.go | 4 ++++ zfs_test.go | 26 ++++++++++++++++++++++++++ zpool_test.go | 4 ++-- 4 files changed, 33 insertions(+), 2 deletions(-) diff --git a/a_test.go b/a_test.go index 28f9aad..da96b49 100644 --- a/a_test.go +++ b/a_test.go @@ -24,6 +24,7 @@ func Test(t *testing.T) { zfsTestDatasetOpen(t) zfsTestDatasetSnapshot(t) zfsTestDatasetOpenAll(t) + zfsTestDatasetSetProperty(t) zfsTestDatasetDestroy(t) diff --git a/common.go b/common.go index b7949d0..81a4e6d 100644 --- a/common.go +++ b/common.go @@ -246,6 +246,10 @@ const ( DatasetPropLogicalused DatasetPropLogicalreferenced DatasetPropInconsistent /* not exposed to the user */ + DatasetPropFilesystemLimit + DatasetPropSnapshotLimit + DatasetPropFilesystemCount + DatasetPropSnapshotCount DatasetPropSnapdev DatasetPropAcltype DatasetPropSelinuxContext diff --git a/zfs_test.go b/zfs_test.go index 7a64992..22628ac 100644 --- a/zfs_test.go +++ b/zfs_test.go @@ -93,6 +93,32 @@ func zfsTestDatasetOpen(t *testing.T) { print("PASS\n\n") } +func zfsTestDatasetSetProperty(t *testing.T) { + println("TEST Dataset SetProp(", TSTDatasetPath, ") ... ") + d, err := zfs.DatasetOpen(TSTDatasetPath) + if err != nil { + t.Error(err) + return + } + defer d.Close() + if err = d.SetProperty(zfs.DatasetPropOverlay, "on"); err != nil { + t.Error(err) + return + } + if prop, err := d.GetProperty(zfs.DatasetPropOverlay); err != nil { + t.Error(err) + return + } else { + println(prop.Value) + if prop.Value != "on" { + t.Error(fmt.Errorf("Update of dataset property failed")) + return + } + } + print("PASS\n\n") + return +} + func zfsTestDatasetOpenAll(t *testing.T) { println("TEST DatasetOpenAll()/DatasetCloseAll() ... ") ds, err := zfs.DatasetOpenAll() diff --git a/zpool_test.go b/zpool_test.go index 3ea75eb..72e9aff 100644 --- a/zpool_test.go +++ b/zpool_test.go @@ -274,9 +274,9 @@ func zpoolTestPoolProp(t *testing.T) { if pool, err := zfs.PoolOpen(TSTPoolName); err == nil { defer pool.Close() // Turn on snapshot listing for pool - pool.SetProperty(zfs.PoolPropListsnaps, "on") + pool.SetProperty(zfs.PoolPropListsnaps, "off") // Verify change is succesfull - if pool.Properties[zfs.PoolPropListsnaps].Value != "on" { + if pool.Properties[zfs.PoolPropListsnaps].Value != "off" { t.Error(fmt.Errorf("Update of pool property failed")) return } From 441e099de9619d1e99746b4c99c5ed67b3c0fcad Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Tue, 26 Jun 2018 14:01:39 +0200 Subject: [PATCH 35/36] zpool online, offline and clear devices --- zpool.c | 16 ++++++ zpool.h | 13 +++++ zpool_vdev.c | 37 +++++++++++++ zpool_vdev.go | 143 ++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 209 insertions(+) create mode 100644 zpool_vdev.c create mode 100644 zpool_vdev.go diff --git a/zpool.c b/zpool.c index a6c982b..d592be4 100644 --- a/zpool.c +++ b/zpool.c @@ -496,4 +496,20 @@ nvlist_ptr go_zpool_search_import(libzfs_handle_ptr zfsh, int paths, char **path // idata.scan = 0; return zpool_search_import(zfsh, &idata); +} + + +int do_zpool_clear(zpool_list_t *pool, const char *device, u_int32_t rewind_policy) { + nvlist_t *policy = NULL; + int ret = 0; + if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 || + nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind_policy) != 0) + return (1); + + if (zpool_clear(pool->zph, device, policy) != 0) + ret = 1; + + nvlist_free(policy); + + return (ret); } \ No newline at end of file diff --git a/zpool.h b/zpool.h index cdb5892..33d416f 100644 --- a/zpool.h +++ b/zpool.h @@ -5,6 +5,15 @@ #ifndef SERVERWARE_ZPOOL_H #define SERVERWARE_ZPOOL_H +/* Rewind request information */ +#define ZPOOL_NO_REWIND 1 /* No policy - default behavior */ +#define ZPOOL_NEVER_REWIND 2 /* Do not search for best txg or rewind */ +#define ZPOOL_TRY_REWIND 4 /* Search for best txg, but do not rewind */ +#define ZPOOL_DO_REWIND 8 /* Rewind to best txg w/in deferred frees */ +#define ZPOOL_EXTREME_REWIND 16 /* Allow extreme measures to find best txg */ +#define ZPOOL_REWIND_MASK 28 /* All the possible rewind bits */ +#define ZPOOL_REWIND_POLICIES 31 /* All the possible policy bits */ + struct zpool_list { zpool_handle_t *zph; void *pnext; @@ -70,6 +79,10 @@ nvlist_ptr get_zpool_vdev_tree(nvlist_ptr nv); nvlist_ptr go_zpool_search_import(libzfs_handle_ptr zfsh, int paths, char **path, boolean_t do_scan); +__uint64_t set_zpool_vdev_online(zpool_list_t *pool, const char *path, int flags); +int set_zpool_vdev_offline(zpool_list_t *pool, const char *path, boolean_t istmp, boolean_t force); +int do_zpool_clear(zpool_list_t *pool, const char *device, u_int32_t rewind_policy); + extern char *sZPOOL_CONFIG_VERSION; extern char *sZPOOL_CONFIG_POOL_NAME; diff --git a/zpool_vdev.c b/zpool_vdev.c new file mode 100644 index 0000000..67bdbde --- /dev/null +++ b/zpool_vdev.c @@ -0,0 +1,37 @@ +#include +#include +#include +#include +#include + +#include "common.h" +#include "zpool.h" + + +__uint64_t set_zpool_vdev_online(zpool_list_t *pool, const char *path, int flags) { + vdev_state_t newstate = VDEV_STATE_UNKNOWN; + zpool_vdev_online(pool->zph, path, flags, &newstate); + return newstate; +} + +int set_zpool_vdev_offline(zpool_list_t *pool, const char *path, boolean_t istmp, boolean_t force) { + int ret = 0; + // if (force) { + // uint64_t guid = zpool_vdev_path_to_guid(pool->zph, path); + // vdev_aux_t aux; + // if (istmp == B_FALSE) { + // /* Force the fault to persist across imports */ + // aux = VDEV_AUX_EXTERNAL_PERSIST; + // } else { + // aux = VDEV_AUX_EXTERNAL; + // } + + // if (guid == 0 || zpool_vdev_fault(pool->zph, guid, aux) != 0) + // ret = 1; + // } else { + if (zpool_vdev_offline(pool->zph, path, istmp) != 0) + ret = 1; + // } + return ret; +} + diff --git a/zpool_vdev.go b/zpool_vdev.go new file mode 100644 index 0000000..379ea9e --- /dev/null +++ b/zpool_vdev.go @@ -0,0 +1,143 @@ +package zfs + +// #include +// #include +// #include "common.h" +// #include "zpool.h" +// #include "zfs.h" +import "C" +import ( + "fmt" + "unsafe" +) + +// Online try to set dev online +// expand - expand storage +func (pool *Pool) Online(expand bool, devs ...string) (err error) { + cflags := C.int(0) + if expand { + cflags = C.ZFS_ONLINE_EXPAND + } + for _, dev := range devs { + csdev := C.CString(dev) + var newstate VDevState + if newstate = VDevState(C.set_zpool_vdev_online(pool.list, csdev, cflags)); newstate != VDevStateUnknown { + if newstate != VDevStateHealthy { + err = fmt.Errorf( + "Device '%s' onlined, but remains in faulted state", + dev) + } + } else { + err = LastError() + } + C.free(unsafe.Pointer(csdev)) + } + return +} + +// Offline Take the device/s in offline state +func (pool *Pool) Offline(force bool, devs ...string) (err error) { + return pool.offline(false, force, devs...) +} + +// OfflineTemp Take the device/s in offline state temporary, +// upon reboot, the specified physical device reverts to its previous state. +// force - Force the device into a faulted state. +func (pool *Pool) OfflineTemp(force bool, devs ...string) (err error) { + return pool.offline(true, force, devs...) +} + +// temp - Upon reboot, the specified physical device reverts to its previous state. +// force - Force the device into a faulted state. +func (pool *Pool) offline(temp, force bool, devs ...string) (err error) { + for _, dev := range devs { + csdev := C.CString(dev) + var newstate VDevState + if newstate = VDevState(C.set_zpool_vdev_offline(pool.list, csdev, booleanT(temp), booleanT(force))); newstate != VDevStateUnknown { + if newstate != VDevStateHealthy { + err = fmt.Errorf( + "Device '%s' offlined, but remains in faulted state", + dev) + } + } else { + err = LastError() + } + C.free(unsafe.Pointer(csdev)) + } + return +} + +// Clear - Clear all errors associated with a pool or a particular device. +func (pool *Pool) Clear(device string) (err error) { + csdev := C.CString(device) + if len(device) == 0 { + csdev = nil + } + if sc := C.do_zpool_clear(pool.list, csdev, C.ZPOOL_NO_REWIND); sc != 0 { + err = fmt.Errorf("Pool clear failed") + } + return +} + +// Attach test +// func (pool *Pool) attach(props PoolProperties, devs ...string) (err error) { +// cprops := toCPoolProperties(props) +// if cprops != nil { +// defer C.nvlist_free(cprops) +// } else { +// return fmt.Errorf("Out of memory [Pool Attach properties]") +// } +// cdevs := C.alloc_cstrings(C.int(len(devs))) +// if cdevs != nil { +// defer C.free(unsafe.Pointer(cdevs)) +// } else { +// return fmt.Errorf("Out of memory [Pool Attach args]") +// } +// for i, dp := range devs { +// tmp := C.CString(dp) +// if tmp != nil { +// defer C.free(unsafe.Pointer(tmp)) +// } else { +// return fmt.Errorf("Out of memory [Pool Attach dev]") +// } +// C.strings_setat(cdevs, C.int(i), tmp) +// } +// // vroot := C.make_root_vdev(pool.list.zph, cprops, 0, 0, 0, 0, len(devs), cdevs) +// var nvroot *C.struct_nvlist +// if r := C.nvlist_alloc(&nvroot, C.NV_UNIQUE_NAME, 0); r != 0 { +// err = errors.New("Failed to allocate root vdev") +// return +// } +// csTypeRoot := C.CString(string(VDevTypeRoot)) +// r := C.nvlist_add_string(nvroot, C.sZPOOL_CONFIG_TYPE, +// csTypeRoot) +// C.free(unsafe.Pointer(csTypeRoot)) +// if r != 0 { +// err = errors.New("Failed to allocate root vdev") +// return +// } +// defer C.nvlist_free(nvroot) + +// // Now we need to build specs (vdev hierarchy) +// if err = buildVDevTree(nvroot, VDevTypeRoot, vdev.Devices, vdev.Spares, vdev.L2Cache, props); err != nil { +// return +// } + +// return +// } + +// func (pool *Pool) AttachForce(devs ...string) (err error) { +// return +// } + +// func (pool *Pool) Detach(devs ...string) (err error) { +// return +// } + +// func (pool *Pool) DetachForce(devs ...string) (err error) { +// return +// } + +// func (pool *Pool) Replace(devs ...string) (err error) { +// return +// } From 7822f47737852721ad96dd4fc55300e1ae641d88 Mon Sep 17 00:00:00 2001 From: Faruk Kasumovic Date: Tue, 26 Jun 2018 14:53:01 +0200 Subject: [PATCH 36/36] PoolStatus to string conversion --- zpool.go | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/zpool.go b/zpool.go index 0cc47a0..6d00056 100644 --- a/zpool.go +++ b/zpool.go @@ -1085,3 +1085,83 @@ func (s VDevState) String() string { return "UNKNOWN" } } + +func (s PoolStatus) String() string { + switch s { + case PoolStatusCorruptCache: + return "CORRUPT_CACHE" + case PoolStatusMissingDevR: + return "MISSING_DEV_R" /* missing device with replicas */ + case PoolStatusMissingDevNr: /* missing device with no replicas */ + return "MISSING_DEV_NR" + case PoolStatusCorruptLabelR: /* bad device label with replicas */ + return "CORRUPT_LABEL_R" + case PoolStatusCorruptLabelNr: /* bad device label with no replicas */ + return "CORRUPT_LABEL_NR" + case PoolStatusBadGUIDSum: /* sum of device guids didn't match */ + return "BAD_GUID_SUM" + case PoolStatusCorruptPool: /* pool metadata is corrupted */ + return "CORRUPT_POOL" + case PoolStatusCorruptData: /* data errors in user (meta)data */ + return "CORRUPT_DATA" + case PoolStatusFailingDev: /* device experiencing errors */ + return "FAILLING_DEV" + case PoolStatusVersionNewer: /* newer on-disk version */ + return "VERSION_NEWER" + case PoolStatusHostidMismatch: /* last accessed by another system */ + return "HOSTID_MISMATCH" + case PoolStatusIoFailureWait: /* failed I/O, failmode 'wait' */ + return "FAILURE_WAIT" + case PoolStatusIoFailureContinue: /* failed I/O, failmode 'continue' */ + return "FAILURE_CONTINUE" + case PoolStatusBadLog: /* cannot read log chain(s) */ + return "BAD_LOG" + case PoolStatusErrata: /* informational errata available */ + return "ERRATA" + + /* + * If the pool has unsupported features but can still be opened in + * read-only mode, its status is ZPOOL_STATUS_UNSUP_FEAT_WRITE. If the + * pool has unsupported features but cannot be opened at all, its + * status is ZPOOL_STATUS_UNSUP_FEAT_READ. + */ + case PoolStatusUnsupFeatRead: /* unsupported features for read */ + return "UNSUP_FEAT_READ" + case PoolStatusUnsupFeatWrite: /* unsupported features for write */ + return "UNSUP_FEAT_WRITE" + + /* + * These faults have no corresponding message ID. At the time we are + * checking the status, the original reason for the FMA fault (I/O or + * checksum errors) has been lost. + */ + case PoolStatusFaultedDevR: /* faulted device with replicas */ + return "FAULTED_DEV_R" + case PoolStatusFaultedDevNr: /* faulted device with no replicas */ + return "FAULTED_DEV_NR" + + /* + * The following are not faults per se, but still an error possibly + * requiring administrative attention. There is no corresponding + * message ID. + */ + case PoolStatusVersionOlder: /* older legacy on-disk version */ + return "VERSION_OLDER" + case PoolStatusFeatDisabled: /* supported features are disabled */ + return "FEAT_DISABLED" + case PoolStatusResilvering: /* device being resilvered */ + return "RESILVERIN" + case PoolStatusOfflineDev: /* device online */ + return "OFFLINE_DEV" + case PoolStatusRemovedDev: /* removed device */ + return "REMOVED_DEV" + + /* + * Finally, the following indicates a healthy pool. + */ + case PoolStatusOk: + return "OK" + default: + return "OK" + } +}