From c896a94589ab2f4122afa4a849561eb1ef392307 Mon Sep 17 00:00:00 2001 From: Aravinda VK Date: Mon, 7 Jan 2019 10:58:23 +0530 Subject: [PATCH] Skip lvs command during thinpool delete If a Volume is cloned from another volume, the bricks of cloned volume will also belong to same LV thinpool as the original Volume. So before removing the thinpool a check was made to confirm if number of Lvs in that thinpool is zero. This check was causing hang when parallel Volume delete commands were issued. With this PR, number of Lvs check is removed, instead of that captured the failure of thinpool delete and handled it gracefully. This PR also adds support for gracefully delete the volume if lv or thinpool already deleted by previous failed transaction or manual delete. Signed-off-by: Aravinda VK --- e2e/smartvol_ops_test.go | 65 +++++++++++++++++++ glusterd2/brick/types.go | 11 +++- .../commands/snapshot/snapshot-create.go | 3 + .../commands/snapshot/snapshot-restore.go | 1 + .../commands/volumes/volume-smartvol-txn.go | 4 +- glusterd2/volume/struct.go | 9 ++- glusterd2/volume/volume-utils.go | 57 +++++++++------- pkg/lvmutils/utils.go | 34 +++++++++- plugins/device/deviceutils/store-utils.go | 2 +- 9 files changed, 153 insertions(+), 33 deletions(-) diff --git a/e2e/smartvol_ops_test.go b/e2e/smartvol_ops_test.go index 53da705dc..e3ccb3840 100644 --- a/e2e/smartvol_ops_test.go +++ b/e2e/smartvol_ops_test.go @@ -510,6 +510,69 @@ func testSmartVolumeAutoDistributeDisperse(t *testing.T) { checkZeroLvs(r) } +func testSmartVolumeWhenCloneExists(t *testing.T) { + r := require.New(t) + + smartvolname := "svol1" + + createReq := api.VolCreateReq{ + Name: smartvolname, + Size: 20 * gutils.MiB, + ReplicaCount: 3, + } + volinfo, err := client.VolumeCreate(createReq) + r.Nil(err) + + r.Len(volinfo.Subvols, 1) + r.Equal("Replicate", volinfo.Type.String()) + r.Len(volinfo.Subvols[0].Bricks, 3) + + err = client.VolumeStart(smartvolname, false) + r.Nil(err) + + // Snapshot Create, Activate, Clone and delete Snapshot + snapshotCreateReq := api.SnapCreateReq{ + VolName: smartvolname, + SnapName: smartvolname + "-s1", + } + _, err = client.SnapshotCreate(snapshotCreateReq) + r.Nil(err, "snapshot create failed") + + var snapshotActivateReq api.SnapActivateReq + + err = client.SnapshotActivate(snapshotActivateReq, smartvolname+"-s1") + r.Nil(err) + + snapshotCloneReq := api.SnapCloneReq{ + CloneName: smartvolname + "-c1", + } + _, err = client.SnapshotClone(smartvolname+"-s1", snapshotCloneReq) + r.Nil(err, "snapshot clone failed") + + err = client.SnapshotDelete(smartvolname + "-s1") + r.Nil(err) + + // Check number of Lvs + nlv, err := numberOfLvs("gluster-dev-gluster_loop1") + r.Nil(err) + // Thinpool + brick + Clone volume's brick + r.Equal(3, nlv) + + r.Nil(client.VolumeStop(smartvolname)) + + r.Nil(client.VolumeDelete(smartvolname)) + + nlv, err = numberOfLvs("gluster-dev-gluster_loop1") + r.Nil(err) + // Thinpool + brick + Clone volume's brick + r.Equal(2, nlv) + + // Delete Clone Volume + r.Nil(client.VolumeDelete(smartvolname + "-c1")) + + checkZeroLvs(r) +} + func editDevice(t *testing.T) { r := require.New(t) peerList, err := client.Peers() @@ -632,6 +695,8 @@ func TestSmartVolume(t *testing.T) { t.Run("Smartvol Distributed-Disperse Volume", testSmartVolumeDistributeDisperse) t.Run("Smartvol Auto Distributed-Replicate Volume", testSmartVolumeAutoDistributeReplicate) t.Run("Smartvol Auto Distributed-Disperse Volume", testSmartVolumeAutoDistributeDisperse) + // Test dependent lvs in thinpool cases + t.Run("Smartvol delete when clone exists", testSmartVolumeWhenCloneExists) t.Run("Replace Brick", testReplaceBrick) t.Run("Edit device", editDevice) diff --git a/glusterd2/brick/types.go b/glusterd2/brick/types.go index c80a7c21e..fc2b872ce 100644 --- a/glusterd2/brick/types.go +++ b/glusterd2/brick/types.go @@ -32,6 +32,14 @@ type MountInfo struct { MntOpts string } +// DeviceInfo is used to store brick device information +type DeviceInfo struct { + TpName string + LvName string + VgName string + RootDevice string +} + // Brickinfo is the static information about the brick type Brickinfo struct { ID uuid.UUID @@ -44,9 +52,8 @@ type Brickinfo struct { Type Type Decommissioned bool PType ProvisionType - VgName string - RootDevice string MountInfo + DeviceInfo } // SizeInfo represents sizing information. diff --git a/glusterd2/commands/snapshot/snapshot-create.go b/glusterd2/commands/snapshot/snapshot-create.go index 89e2760a8..69b650043 100644 --- a/glusterd2/commands/snapshot/snapshot-create.go +++ b/glusterd2/commands/snapshot/snapshot-create.go @@ -500,6 +500,9 @@ func createSnapSubvols(newVolinfo, origVolinfo *volume.Volinfo, nodeData map[str DevicePath: mountData.DevicePath, FsType: mountData.FsType, MntOpts: mountData.MntOpts, + VgName: brickinfo.DeviceInfo.VgName, + RootDevice: brickinfo.DeviceInfo.RootDevice, + TpName: brickinfo.DeviceInfo.TpName, } bricks = append(bricks, brick) diff --git a/glusterd2/commands/snapshot/snapshot-restore.go b/glusterd2/commands/snapshot/snapshot-restore.go index 7ba4a3f6b..fcfe8eb08 100644 --- a/glusterd2/commands/snapshot/snapshot-restore.go +++ b/glusterd2/commands/snapshot/snapshot-restore.go @@ -181,6 +181,7 @@ func createVolumeBrickFromSnap(bricks []brick.Brickinfo, vol *volume.Volinfo) [] Hostname: b.Hostname, ID: b.ID, MountInfo: b.MountInfo, + DeviceInfo: b.DeviceInfo, Path: b.Path, PeerID: b.PeerID, Type: b.Type, diff --git a/glusterd2/commands/volumes/volume-smartvol-txn.go b/glusterd2/commands/volumes/volume-smartvol-txn.go index cd42607eb..9744c4244 100644 --- a/glusterd2/commands/volumes/volume-smartvol-txn.go +++ b/glusterd2/commands/volumes/volume-smartvol-txn.go @@ -136,7 +136,7 @@ func txnUndoPrepareBricks(c transaction.TxnCtx) error { } // Remove LV - err = lvmutils.RemoveLV(b.VgName, b.LvName) + err = lvmutils.RemoveLV(b.VgName, b.LvName, true) if err != nil { c.Logger().WithError(err).WithFields(log.Fields{ "vg-name": b.VgName, @@ -145,7 +145,7 @@ func txnUndoPrepareBricks(c transaction.TxnCtx) error { } // Remove Thin Pool - err = lvmutils.RemoveLV(b.VgName, b.TpName) + err = lvmutils.RemoveLV(b.VgName, b.TpName, true) if err != nil { c.Logger().WithError(err).WithFields(log.Fields{ "vg-name": b.VgName, diff --git a/glusterd2/volume/struct.go b/glusterd2/volume/struct.go index 856c479f9..69af5ba40 100644 --- a/glusterd2/volume/struct.go +++ b/glusterd2/volume/struct.go @@ -175,8 +175,13 @@ func NewBrickEntries(bricks []api.BrickReq, volName, volfileID string, volID uui binfo.VolfileID = volfileID binfo.VolumeID = volID binfo.ID = uuid.NewRandom() - binfo.VgName = b.VgName - binfo.RootDevice = b.RootDevice + + binfo.DeviceInfo = brick.DeviceInfo{ + LvName: b.LvName, + TpName: b.TpName, + VgName: b.VgName, + RootDevice: b.RootDevice, + } binfo.PType = ptype if ptype.IsAutoProvisioned() || ptype.IsSnapshotProvisioned() { diff --git a/glusterd2/volume/volume-utils.go b/glusterd2/volume/volume-utils.go index 9897d294e..9d9728183 100644 --- a/glusterd2/volume/volume-utils.go +++ b/glusterd2/volume/volume-utils.go @@ -286,18 +286,11 @@ func CleanBricks(volinfo *Volinfo) error { } vgname := parts[2] lvname := parts[3] - tpname, err := lvmutils.GetThinpoolName(vgname, lvname) - if err != nil { - log.WithError(err).WithFields(log.Fields{ - "vg-name": vgname, - "lv-name": lvname, - }).Error("failed to get thinpool name") - return err - } // Remove LV - err = lvmutils.RemoveLV(vgname, lvname) - if err != nil { + err = lvmutils.RemoveLV(vgname, lvname, true) + // Ignore error if LV not exists + if err != nil && !lvmutils.IsLvNotFoundError(err) { log.WithError(err).WithFields(log.Fields{ "vg-name": vgname, "lv-name": lvname, @@ -309,26 +302,42 @@ func CleanBricks(volinfo *Volinfo) error { continue } - // Remove Thin Pool if LV count is zero, Thinpool will - // have more LVs in case of snapshots and clones - numLvs, err := lvmutils.NumberOfLvs(vgname, tpname) - if err != nil { - log.WithError(err).WithFields(log.Fields{ - "vg-name": vgname, - "tp-name": tpname, - }).Error("failed to get number of lvs") - return err - } - - if numLvs == 0 { - err = lvmutils.RemoveLV(vgname, tpname) + // Thinpool info will not be available if Volume is manually provisioned + // or a volume is cloned from a manually provisioned volume + if b.DeviceInfo.TpName != "" { + err = lvmutils.DeactivateLV(vgname, b.DeviceInfo.TpName) if err != nil { log.WithError(err).WithFields(log.Fields{ "vg-name": vgname, - "tp-name": tpname, + "tp-name": b.DeviceInfo.TpName, + }).Error("thinpool deactivate failed") + return err + } + + err = lvmutils.RemoveLV(vgname, b.DeviceInfo.TpName, false) + // Do not remove Thinpool if the dependent Lvs exists + // Ignore the lvremove command failure if the reason is + // dependent Lvs exists + if err != nil && !lvmutils.IsDependentLvsError(err) && !lvmutils.IsLvNotFoundError(err) { + log.WithError(err).WithFields(log.Fields{ + "vg-name": vgname, + "tp-name": b.DeviceInfo.TpName, }).Error("thinpool remove failed") return err } + + // Thinpool is not removed if dependent Lvs exists, + // activate the thinpool again + if err != nil && lvmutils.IsDependentLvsError(err) { + err = lvmutils.ActivateLV(vgname, b.DeviceInfo.TpName) + if err != nil { + log.WithError(err).WithFields(log.Fields{ + "vg-name": vgname, + "tp-name": b.DeviceInfo.TpName, + }).Error("thinpool activate failed") + return err + } + } } // Update current Vg free size diff --git a/pkg/lvmutils/utils.go b/pkg/lvmutils/utils.go index be2e47ea2..15780b7e9 100644 --- a/pkg/lvmutils/utils.go +++ b/pkg/lvmutils/utils.go @@ -149,9 +149,39 @@ func UnmountLV(mountdir string) error { return syscall.Unmount(mountdir, syscall.MNT_FORCE) } +// IsDependentLvsError returns true if the error related to dependent Lvs exists +func IsDependentLvsError(err error) bool { + if err == nil { + return false + } + return strings.Contains(err.Error(), "dependent volume(s). Proceed? [y/n]") +} + +// IsLvNotFoundError returns true if the error is related to non existent LV error +func IsLvNotFoundError(err error) bool { + if err == nil { + return false + } + return strings.Contains(err.Error(), "Failed to find logical volume") +} + +// DeactivateLV deactivates a Logical Volume +func DeactivateLV(vgName, lvName string) error { + return utils.ExecuteCommandRun("lvchange", "-a", "n", vgName+"/"+lvName) +} + +// ActivateLV activates a Logical Volume +func ActivateLV(vgName, lvName string) error { + return utils.ExecuteCommandRun("lvchange", "-a", "y", vgName+"/"+lvName) +} + // RemoveLV removes Logical Volume -func RemoveLV(vgName, lvName string) error { - return utils.ExecuteCommandRun("lvremove", "-f", vgName+"/"+lvName) +func RemoveLV(vgName, lvName string, force bool) error { + args := []string{"--autobackup", "y", vgName + "/" + lvName} + if force { + args = append(args, "-f") + } + return utils.ExecuteCommandRun("lvremove", args...) } // NumberOfLvs returns number of Lvs present in thinpool diff --git a/plugins/device/deviceutils/store-utils.go b/plugins/device/deviceutils/store-utils.go index f2a4bd440..30de3f3bf 100644 --- a/plugins/device/deviceutils/store-utils.go +++ b/plugins/device/deviceutils/store-utils.go @@ -190,7 +190,7 @@ func CheckForAvailableVgSize(expansionSize uint64, bricksInfo []brick.Brickinfo) if requiredDeviceSizeMap[b.MountInfo.DevicePath] > deviceSize { return map[string]string{}, false, nil } - brickVgMapping[b.Path] = b.VgName + brickVgMapping[b.Path] = b.DeviceInfo.VgName } return brickVgMapping, true, nil