Skip to content
This repository has been archived by the owner on Mar 26, 2020. It is now read-only.

Commit

Permalink
Merge branch 'master' into vol-stop-socket-connect
Browse files Browse the repository at this point in the history
  • Loading branch information
Atin Mukherjee authored Jan 8, 2019
2 parents 6b2e269 + c896a94 commit bb1ba6b
Show file tree
Hide file tree
Showing 9 changed files with 153 additions and 33 deletions.
65 changes: 65 additions & 0 deletions e2e/smartvol_ops_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -510,6 +510,69 @@ func testSmartVolumeAutoDistributeDisperse(t *testing.T) {
checkZeroLvs(r)
}

func testSmartVolumeWhenCloneExists(t *testing.T) {
r := require.New(t)

smartvolname := "svol1"

createReq := api.VolCreateReq{
Name: smartvolname,
Size: 20 * gutils.MiB,
ReplicaCount: 3,
}
volinfo, err := client.VolumeCreate(createReq)
r.Nil(err)

r.Len(volinfo.Subvols, 1)
r.Equal("Replicate", volinfo.Type.String())
r.Len(volinfo.Subvols[0].Bricks, 3)

err = client.VolumeStart(smartvolname, false)
r.Nil(err)

// Snapshot Create, Activate, Clone and delete Snapshot
snapshotCreateReq := api.SnapCreateReq{
VolName: smartvolname,
SnapName: smartvolname + "-s1",
}
_, err = client.SnapshotCreate(snapshotCreateReq)
r.Nil(err, "snapshot create failed")

var snapshotActivateReq api.SnapActivateReq

err = client.SnapshotActivate(snapshotActivateReq, smartvolname+"-s1")
r.Nil(err)

snapshotCloneReq := api.SnapCloneReq{
CloneName: smartvolname + "-c1",
}
_, err = client.SnapshotClone(smartvolname+"-s1", snapshotCloneReq)
r.Nil(err, "snapshot clone failed")

err = client.SnapshotDelete(smartvolname + "-s1")
r.Nil(err)

// Check number of Lvs
nlv, err := numberOfLvs("gluster-dev-gluster_loop1")
r.Nil(err)
// Thinpool + brick + Clone volume's brick
r.Equal(3, nlv)

r.Nil(client.VolumeStop(smartvolname))

r.Nil(client.VolumeDelete(smartvolname))

nlv, err = numberOfLvs("gluster-dev-gluster_loop1")
r.Nil(err)
// Thinpool + brick + Clone volume's brick
r.Equal(2, nlv)

// Delete Clone Volume
r.Nil(client.VolumeDelete(smartvolname + "-c1"))

checkZeroLvs(r)
}

func editDevice(t *testing.T) {
r := require.New(t)
peerList, err := client.Peers()
Expand Down Expand Up @@ -632,6 +695,8 @@ func TestSmartVolume(t *testing.T) {
t.Run("Smartvol Distributed-Disperse Volume", testSmartVolumeDistributeDisperse)
t.Run("Smartvol Auto Distributed-Replicate Volume", testSmartVolumeAutoDistributeReplicate)
t.Run("Smartvol Auto Distributed-Disperse Volume", testSmartVolumeAutoDistributeDisperse)
// Test dependent lvs in thinpool cases
t.Run("Smartvol delete when clone exists", testSmartVolumeWhenCloneExists)
t.Run("Replace Brick", testReplaceBrick)
t.Run("Edit device", editDevice)

Expand Down
11 changes: 9 additions & 2 deletions glusterd2/brick/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,14 @@ type MountInfo struct {
MntOpts string
}

// DeviceInfo is used to store brick device information
type DeviceInfo struct {
TpName string
LvName string
VgName string
RootDevice string
}

// Brickinfo is the static information about the brick
type Brickinfo struct {
ID uuid.UUID
Expand All @@ -44,9 +52,8 @@ type Brickinfo struct {
Type Type
Decommissioned bool
PType ProvisionType
VgName string
RootDevice string
MountInfo
DeviceInfo
}

// SizeInfo represents sizing information.
Expand Down
3 changes: 3 additions & 0 deletions glusterd2/commands/snapshot/snapshot-create.go
Original file line number Diff line number Diff line change
Expand Up @@ -500,6 +500,9 @@ func createSnapSubvols(newVolinfo, origVolinfo *volume.Volinfo, nodeData map[str
DevicePath: mountData.DevicePath,
FsType: mountData.FsType,
MntOpts: mountData.MntOpts,
VgName: brickinfo.DeviceInfo.VgName,
RootDevice: brickinfo.DeviceInfo.RootDevice,
TpName: brickinfo.DeviceInfo.TpName,
}

bricks = append(bricks, brick)
Expand Down
1 change: 1 addition & 0 deletions glusterd2/commands/snapshot/snapshot-restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,7 @@ func createVolumeBrickFromSnap(bricks []brick.Brickinfo, vol *volume.Volinfo) []
Hostname: b.Hostname,
ID: b.ID,
MountInfo: b.MountInfo,
DeviceInfo: b.DeviceInfo,
Path: b.Path,
PeerID: b.PeerID,
Type: b.Type,
Expand Down
4 changes: 2 additions & 2 deletions glusterd2/commands/volumes/volume-smartvol-txn.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ func txnUndoPrepareBricks(c transaction.TxnCtx) error {
}

// Remove LV
err = lvmutils.RemoveLV(b.VgName, b.LvName)
err = lvmutils.RemoveLV(b.VgName, b.LvName, true)
if err != nil {
c.Logger().WithError(err).WithFields(log.Fields{
"vg-name": b.VgName,
Expand All @@ -145,7 +145,7 @@ func txnUndoPrepareBricks(c transaction.TxnCtx) error {
}

// Remove Thin Pool
err = lvmutils.RemoveLV(b.VgName, b.TpName)
err = lvmutils.RemoveLV(b.VgName, b.TpName, true)
if err != nil {
c.Logger().WithError(err).WithFields(log.Fields{
"vg-name": b.VgName,
Expand Down
9 changes: 7 additions & 2 deletions glusterd2/volume/struct.go
Original file line number Diff line number Diff line change
Expand Up @@ -175,8 +175,13 @@ func NewBrickEntries(bricks []api.BrickReq, volName, volfileID string, volID uui
binfo.VolfileID = volfileID
binfo.VolumeID = volID
binfo.ID = uuid.NewRandom()
binfo.VgName = b.VgName
binfo.RootDevice = b.RootDevice

binfo.DeviceInfo = brick.DeviceInfo{
LvName: b.LvName,
TpName: b.TpName,
VgName: b.VgName,
RootDevice: b.RootDevice,
}

binfo.PType = ptype
if ptype.IsAutoProvisioned() || ptype.IsSnapshotProvisioned() {
Expand Down
57 changes: 33 additions & 24 deletions glusterd2/volume/volume-utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -286,18 +286,11 @@ func CleanBricks(volinfo *Volinfo) error {
}
vgname := parts[2]
lvname := parts[3]
tpname, err := lvmutils.GetThinpoolName(vgname, lvname)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"vg-name": vgname,
"lv-name": lvname,
}).Error("failed to get thinpool name")
return err
}

// Remove LV
err = lvmutils.RemoveLV(vgname, lvname)
if err != nil {
err = lvmutils.RemoveLV(vgname, lvname, true)
// Ignore error if LV not exists
if err != nil && !lvmutils.IsLvNotFoundError(err) {
log.WithError(err).WithFields(log.Fields{
"vg-name": vgname,
"lv-name": lvname,
Expand All @@ -309,26 +302,42 @@ func CleanBricks(volinfo *Volinfo) error {
continue
}

// Remove Thin Pool if LV count is zero, Thinpool will
// have more LVs in case of snapshots and clones
numLvs, err := lvmutils.NumberOfLvs(vgname, tpname)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"vg-name": vgname,
"tp-name": tpname,
}).Error("failed to get number of lvs")
return err
}

if numLvs == 0 {
err = lvmutils.RemoveLV(vgname, tpname)
// Thinpool info will not be available if Volume is manually provisioned
// or a volume is cloned from a manually provisioned volume
if b.DeviceInfo.TpName != "" {
err = lvmutils.DeactivateLV(vgname, b.DeviceInfo.TpName)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"vg-name": vgname,
"tp-name": tpname,
"tp-name": b.DeviceInfo.TpName,
}).Error("thinpool deactivate failed")
return err
}

err = lvmutils.RemoveLV(vgname, b.DeviceInfo.TpName, false)
// Do not remove Thinpool if the dependent Lvs exists
// Ignore the lvremove command failure if the reason is
// dependent Lvs exists
if err != nil && !lvmutils.IsDependentLvsError(err) && !lvmutils.IsLvNotFoundError(err) {
log.WithError(err).WithFields(log.Fields{
"vg-name": vgname,
"tp-name": b.DeviceInfo.TpName,
}).Error("thinpool remove failed")
return err
}

// Thinpool is not removed if dependent Lvs exists,
// activate the thinpool again
if err != nil && lvmutils.IsDependentLvsError(err) {
err = lvmutils.ActivateLV(vgname, b.DeviceInfo.TpName)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"vg-name": vgname,
"tp-name": b.DeviceInfo.TpName,
}).Error("thinpool activate failed")
return err
}
}
}

// Update current Vg free size
Expand Down
34 changes: 32 additions & 2 deletions pkg/lvmutils/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,9 +149,39 @@ func UnmountLV(mountdir string) error {
return syscall.Unmount(mountdir, syscall.MNT_FORCE)
}

// IsDependentLvsError returns true if the error related to dependent Lvs exists
func IsDependentLvsError(err error) bool {
if err == nil {
return false
}
return strings.Contains(err.Error(), "dependent volume(s). Proceed? [y/n]")
}

// IsLvNotFoundError returns true if the error is related to non existent LV error
func IsLvNotFoundError(err error) bool {
if err == nil {
return false
}
return strings.Contains(err.Error(), "Failed to find logical volume")
}

// DeactivateLV deactivates a Logical Volume
func DeactivateLV(vgName, lvName string) error {
return utils.ExecuteCommandRun("lvchange", "-a", "n", vgName+"/"+lvName)
}

// ActivateLV activates a Logical Volume
func ActivateLV(vgName, lvName string) error {
return utils.ExecuteCommandRun("lvchange", "-a", "y", vgName+"/"+lvName)
}

// RemoveLV removes Logical Volume
func RemoveLV(vgName, lvName string) error {
return utils.ExecuteCommandRun("lvremove", "-f", vgName+"/"+lvName)
func RemoveLV(vgName, lvName string, force bool) error {
args := []string{"--autobackup", "y", vgName + "/" + lvName}
if force {
args = append(args, "-f")
}
return utils.ExecuteCommandRun("lvremove", args...)
}

// NumberOfLvs returns number of Lvs present in thinpool
Expand Down
2 changes: 1 addition & 1 deletion plugins/device/deviceutils/store-utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ func CheckForAvailableVgSize(expansionSize uint64, bricksInfo []brick.Brickinfo)
if requiredDeviceSizeMap[b.MountInfo.DevicePath] > deviceSize {
return map[string]string{}, false, nil
}
brickVgMapping[b.Path] = b.VgName
brickVgMapping[b.Path] = b.DeviceInfo.VgName
}

return brickVgMapping, true, nil
Expand Down

0 comments on commit bb1ba6b

Please sign in to comment.