diff --git a/ChangeLog.rst b/ChangeLog.rst index fc5ec22205..c8bb611a97 100644 --- a/ChangeLog.rst +++ b/ChangeLog.rst @@ -9,6 +9,10 @@ Breaking changes be lost. Please see the pull-request description for manual steps required after upgrading a MetalK8s 0.1 cluster to MetalK8s 0.2 (:ghissue:`147`) +:ghpull:`94` - flatten the storage configuration and allow more user defined +storage related actions. Please see :ref:`upgrade_from_MetalK8s_before_0.2.0` +(:ghissue:`153`) + Release 0.1.2 (in development) ============================== diff --git a/docs/architecture/storage.rst b/docs/architecture/storage.rst index e0d0eddceb..47e626fec6 100644 --- a/docs/architecture/storage.rst +++ b/docs/architecture/storage.rst @@ -1,2 +1,34 @@ Storage Architecture ==================== + +MetalK8s current strage architecture rely on local storage, configured with LVM +for its purpose. + +A default setup, satisfying the storage needs of MetalK8s is automatically +setup by default and can be easily extended through the various configuration +items exposed by the tool. + +Glossary +######## + +* LVM PV: The LVM Physical Volume. This is the disk or the partition provided + to LVM to create the LVM Volume Group +* LVM VG : The LVM Volume Group. This is the logical unit of LVM aggregating + the LVM Physical Volumes into one single logical entity +* LVM LV: A Logical Volume. This is where the filesystem will be created. + Several LVM LVs can be created on a single LVM VG +* PV : Kubernetes Persistent Volume. This is what will be consumed by a + Persistent Volume Claim for the Kubernetes storage needs +* PVC : Kubernetes Persisten Volume Claim + + +Goal +#### + +MetalK8s provides a functional Kubernetes cluster with some opinionated +deployment for the monitoring and logging aspect. +These deployments require storage, but we wanted to provide an easy way for +the end user to add it's own configuration + +As the deployment of Kubernetes on premise is focused on dedicated hardware, +Logical Volume Manager (LVM) has been chosen. diff --git a/docs/usage/quickstart.rst b/docs/usage/quickstart.rst index e256ff0a4e..da8eae89ce 100644 --- a/docs/usage/quickstart.rst +++ b/docs/usage/quickstart.rst @@ -64,6 +64,27 @@ subdirectory of our inventory, we declare how to setup storage (in the default configuration) on hosts in the *kube-node* group, i.e. hosts on which Pods will be scheduled: +.. code-block:: yaml + + metalk8s_lvm_drives_vg_metalk8s: ['/dev/vdb'] + +In the above, we assume every *kube-node* host has a disk available as +:file:`/dev/vdb` which can be used to set up Kubernetes *PersistentVolumes*. For +more information about storage, see :doc:`../architecture/storage`. + +.. _upgrade_from_MetalK8s_before_0.2.0: + +Upgrading from MetalK8s < 0.2.0 +------------------------------- + +The storage configuration changed in a non-backward compatible way on +MetalK8s 0.2.0 release. +The old configuration will trigger an error when the playbook +:file:`playbooks/deploy.yml` is run. + + +An old configuration looking like this + .. code-block:: yaml metal_k8s_lvm: @@ -71,9 +92,45 @@ Pods will be scheduled: kubevg: drives: ['/dev/vdb'] -In the above, we assume every *kube-node* host has a disk available as -:file:`/dev/vdb` which can be used to set up Kubernetes *PersistentVolumes*. For -more information about storage, see :doc:`../architecture/storage`. +would become + +.. code-block:: yaml + + metalk8s_lvm_default_vg: False + metalk8s_lvm_vgs: ['kubevg'] + metalk8s_lvm_drives_kubevg: ['/dev/vdb'] + metalk8s_lvm_lvs_kubevg: + lv01: + size: 52G + lv02: + size: 52G + lv03: + size: 52G + lv04: + size: 11G + lv05: + size: 11G + lv06: + size: 11G + lv07: + size: 5G + lv08: + size: 5G + +A quick explanation of these new variables and why they are required + +* metalk8s_lvm_default_vg: The value *False* will ensure that we disable all + automatic logic behind configuring the storage + +* metalk8s_lvm_vgs: This is a list of the LVM VGs managed by MetalK8s + +* metalk8s_lvm_drives_kubevg: This variable is a concatenation of the prefix + *metalk8s_lvm_drives_* and the name of the LVM VG. It is used to specify + the drives used for this LVM VG + +* metalk8s_lvm_lvs_kubevg: This variable is a concatenation of the prefix + *metalk8s_lvm_lvs_* and the name of the LVM VG. It is used to specify + the LVM LVs created in this LVM VG. Entering the MetalK8s Shell --------------------------- diff --git a/playbooks/storage-post.yml b/playbooks/storage-post.yml index 7c1c3d2b49..2eacc6d386 100644 --- a/playbooks/storage-post.yml +++ b/playbooks/storage-post.yml @@ -1,6 +1,5 @@ -- hosts: kube-node +- hosts: kube-master tags: - - lvm-storage - kube-pv roles: - - role: kube_lvm_storage + - role: kube_lvm_storageclass diff --git a/playbooks/storage-pre.yml b/playbooks/storage-pre.yml index b2414dafb7..59016d688e 100644 --- a/playbooks/storage-pre.yml +++ b/playbooks/storage-pre.yml @@ -2,4 +2,5 @@ tags: - lvm-storage roles: - - role: setup_lvm + - role: setup_lvm_vg + - role: setup_lvm_lv diff --git a/roles/kube_lvm_storage/defaults/main.yml b/roles/kube_lvm_storage/defaults/main.yml deleted file mode 100644 index 5e1ce5be02..0000000000 --- a/roles/kube_lvm_storage/defaults/main.yml +++ /dev/null @@ -1,51 +0,0 @@ -debug: False - -storage_addon_dir: '{{ kube_config_dir }}/addons/storage_lvm' - -# ################ # -# LVM confguration # -# ################ # - -# Set the storage class setup on kubernetes node -metal_k8s_storage_class: - storage_classes: - local-lvm: - is_default: true - lvm_conf: - default_fstype: 'ext4' - default_fs_force: False - default_fs_opts: '-m 0' - default_mount_opts: 'defaults' - vgs: - kubevg: - host_path: '/mnt/kubevg' - storage_class: 'local-lvm' - volumes: - lv01: - size: 52G - lv02: - size: 52G - lv03: - size: 52G - lv04: - size: 11G - lv05: - size: 11G - lv06: - size: 11G - lv07: - size: 5G - lv08: - size: 5G -# -# You can also customize a specific volume like this -# -# vgs: -# kubevg: -# volumes: -# lv01: -# size: 10G -# fstype: xfs -# fs_force: True -# fs_opts: '-m 0 -cc' -# mount_opts: 'defaults,noatime' diff --git a/roles/kube_lvm_storage/meta/main.yml b/roles/kube_lvm_storage/meta/main.yml deleted file mode 100644 index 7a88a8d42c..0000000000 --- a/roles/kube_lvm_storage/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - role: kubespray_module diff --git a/roles/kube_lvm_storage/tasks/main.yml b/roles/kube_lvm_storage/tasks/main.yml deleted file mode 100644 index 57bcbf2326..0000000000 --- a/roles/kube_lvm_storage/tasks/main.yml +++ /dev/null @@ -1,207 +0,0 @@ -- name: 'Check that you have mkfs for ext4 and xfs filesystem' - package: - name: '{{ item }}' - state: present - with_items: - - e2fsprogs - - xfsprogs - -- name: 'Assert all vgs have a host_path and volumes defined' - assert: - that: - - item.value.volumes|length > 0 - - item.value.host_path|default("") != "" - with_dict: '{{ metal_k8s_storage_class.lvm_conf.vgs }}' - -- name: 'create dir for lvm storage' - file: - dest: '{{ item.value.host_path }}' - state: directory - with_dict: '{{ metal_k8s_storage_class.lvm_conf.vgs }}' - -- name: display vgs in metal_k8s_storage_class - debug: - msg: '{{ metal_k8s_storage_class.lvm_conf.vgs }}' - when: debug|bool - -# -# Set a dictionary that will be used for the LVM configuration -# based on the metal_k8s_storage_class var -# -# ..code:: -# -# { -# '/dev/mapper/kubevg-lv01' : { -# 'host_path': '/mnt/kubevg', -# 'vg': 'kubevg', -# 'volume': { -# 'name': 'lv01', -# 'fstype': 'ext4', -# 'size': '10G', -# 'fs_force': False, -# 'fs_opts': "", -# 'mount_opts': "", -# }, -# } -# } -# - -- name: set fact with metal_k8s_storage_class attributes - set_fact: - metal_k8s_lvm_conf: >- - { - {%- for vg, prop in metal_k8s_storage_class.lvm_conf.vgs.items() -%} - {%- set host_path = prop.host_path -%} - {%- for volume_name, volume in prop.volumes.items() -%} - {%- set _ = volume.update( - {'fstype': volume.fstype - |default(metal_k8s_storage_class.lvm_conf.default_fstype)}) -%} - {%- set _ = volume.update( - {'fs_force': volume.fs_force - |default( - metal_k8s_storage_class.lvm_conf.default_fs_force - )}) -%} - {%- set _ = volume.update( - {'fs_opts': volume.fs_opts - |default( - metal_k8s_storage_class.lvm_conf.default_fs_opts)}) -%} - {%- set _ = volume.update( - {'mount_opts': volume.mount_opts - |default( - metal_k8s_storage_class.lvm_conf.default_mount_opts)}) -%} - {%- set device = '/dev/mapper/' ~ vg ~ '-' - ~ volume_name.replace("-", "--") -%} - '{{ device }}': {{ dict(vg=vg, - volume=volume, - volume_name=volume_name, - host_path=prop.host_path, - storage_class=prop.storage_class,) - }}, - {%- endfor -%} - {%- endfor -%} - } - -- name: display metal_k8s_lvm_conf dictionary - debug: - msg: '{{ metal_k8s_lvm_conf }}' - when: debug|bool - -- name: 'create lvm volumes with required size for each vg' - lvol: - lv: '{{ item.value.volume_name }}' - vg: '{{ item.value.vg }}' - size: '{{ item.value.volume.size }}' - state: present - shrink: False - with_dict: '{{ metal_k8s_lvm_conf }}' - -- name: 'create filesystem on each lvm volumes' - filesystem: - fstype: '{{ item.value.volume.fstype }}' - dev: '{{ item.key }}' - opts: '{{ item.value.volume.fs_opts }}' - force: '{{ item.value.volume.fs_force }}' - with_dict: '{{ metal_k8s_lvm_conf }}' - -- name: 'get UUIDs of lvm volumes' - command: blkid -s UUID -o value {{ item.key }} - check_mode: False - changed_when: False - register: metal_k8s_lvm_uuids - with_dict: '{{ metal_k8s_lvm_conf }}' - -- name: display UUIDs - debug: - msg: '{{ metal_k8s_lvm_uuids.results }}' - when: debug|bool - -# Update metal_k8s_conf with UUIDs of the filesystems -# -# ..code:: -# -# { -# '/dev/mapper/kubevg-lv01' : { -# 'host_path': '/mnt/kubevg', -# 'vg': 'kubevg', -# 'volume': { -# 'name': 'lv01', -# 'fstype': 'ext4', -# 'size': '10G', -# 'uuid': 'xxxx-yyyy' -# }, -# } -# } - -- name: update fact metal_k8s_lvm_conf with UUIDs - set_fact: - metal_k8s_lvm_conf: >- - {%- for result in metal_k8s_lvm_uuids.results|default([]) -%} - {%- set _ = metal_k8s_lvm_conf[result.item.key].volume.update({'uuid': result.stdout}) -%} - {%- endfor -%} - {{ metal_k8s_lvm_conf }} - -- name: display metal_k8s_lvm_conf dictionary with UUIDs - debug: - msg: '{{ metal_k8s_lvm_conf }}' - when: debug|bool - -- name: 'mount filesystem for each lvm volumes' - mount: - path: '{{ item.value.host_path }}/{{ item.value.volume.uuid }}' - src: UUID={{ item.value.volume.uuid }} - opts: '{{ item.value.volume.mount_opts }}' - fstype: '{{ item.value.volume.fstype }}' - state: mounted - with_dict: '{{ metal_k8s_lvm_conf }}' - -- name: 'Assert each LVM Vgs has a StorageClass name' - assert: - that: - - 'item.value.storage_class' - with_dict: '{{ metal_k8s_storage_class.lvm_conf.vgs }}' - -# TODO: think about moving this part (till the end) in another role -# that only target 'kube-master' group - -- name: 'create storage addon dir' - file: - path: '{{ storage_addon_dir }}' - owner: root - group: root - mode: 0755 - state: directory - recurse: true - run_once: True - delegate_to: "{{ groups['kube-master'][0] }}" - -- name: 'Create storage-class manifests' - template: - src: local-storageclass.yml.j2 - dest: '{{ storage_addon_dir }}/storage-class-{{ item.key }}.yml' - with_dict: '{{ metal_k8s_storage_class.storage_classes }}' - run_once: True - delegate_to: "{{ groups['kube-master'][0] }}" - -- name: 'Apply manifests for storage-class' - kube: - kubectl: '{{ bin_dir }}/kubectl' - filename: '{{ storage_addon_dir }}/storage-class-{{ item.key }}.yml' - state: 'latest' - with_dict: '{{ metal_k8s_storage_class.storage_classes }}' - run_once: True - delegate_to: "{{ groups['kube-master'][0] }}" - -- name: 'Create pv manifests' - template: - src: local-pv.yml.j2 - dest: '{{ storage_addon_dir }}/pv-{{ item.value.volume.uuid }}.yml' - with_dict: '{{ metal_k8s_lvm_conf }}' - delegate_to: "{{ groups['kube-master'][0] }}" - -- name: 'Apply manifests for pv' - kube: - kubectl: '{{ bin_dir }}/kubectl' - filename: '{{ storage_addon_dir }}/pv-{{ item.value.volume.uuid }}.yml' - state: 'latest' - with_dict: '{{ metal_k8s_lvm_conf }}' - delegate_to: "{{ groups['kube-master'][0] }}" diff --git a/roles/kube_lvm_storage/templates/local-pv.yml.j2 b/roles/kube_lvm_storage/templates/local-pv.yml.j2 deleted file mode 100644 index 83d8439fa8..0000000000 --- a/roles/kube_lvm_storage/templates/local-pv.yml.j2 +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: >- - {{ item.value.vg|replace("_","-") }}-{{ item.value.volume.uuid }} -spec: - capacity: - storage: '{{ item.value.volume.size|size_lvm_to_k8s }}' - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain - storageClassName: '{{ item.value.storage_class }}' - local: - path: '{{ item.value.host_path }}/{{ item.value.volume.uuid }}' - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - '{{ ansible_nodename }}' diff --git a/roles/kube_lvm_storageclass/defaults/main.yml b/roles/kube_lvm_storageclass/defaults/main.yml new file mode 100644 index 0000000000..8fcdf9c355 --- /dev/null +++ b/roles/kube_lvm_storageclass/defaults/main.yml @@ -0,0 +1,6 @@ +debug: False + +metalk8s_storage_addon_dir: '{{ kube_config_dir }}/addons/storage_lvm' + +# Set default LVM VGs storageclass +metalk8s_default_storageclass: 'local-lvm' diff --git a/roles/kube_lvm_storage/filter_plugins/size_lvm_to_k8s.py b/roles/kube_lvm_storageclass/filter_plugins/size_lvm_to_k8s.py similarity index 100% rename from roles/kube_lvm_storage/filter_plugins/size_lvm_to_k8s.py rename to roles/kube_lvm_storageclass/filter_plugins/size_lvm_to_k8s.py diff --git a/roles/kube_lvm_storageclass/meta/main.yml b/roles/kube_lvm_storageclass/meta/main.yml new file mode 100644 index 0000000000..cae6819273 --- /dev/null +++ b/roles/kube_lvm_storageclass/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - role: kubespray_module diff --git a/roles/kube_lvm_storageclass/tasks/main.yml b/roles/kube_lvm_storageclass/tasks/main.yml new file mode 100644 index 0000000000..1dad530c68 --- /dev/null +++ b/roles/kube_lvm_storageclass/tasks/main.yml @@ -0,0 +1,77 @@ +# The role default variable are not present in hostvars[inventory_hostname] +# https://github.com/ansible/ansible/issues/6189 +# We then rely on "vars" which "works" but not happy about it +#- name: "Setup MetalK8s Storage: Check that VG variables are defined" +# tags: +# - assertion +# assert: +# that: +# - metalk8s_lvm_all_vgs is defined +# with_items: '{{ metalk8s_lvm_vgs }}' + +- name: 'Setup MetalK8s StorageClass: Create storage addon dir' + file: + path: '{{ metalk8s_storage_addon_dir }}' + owner: root + group: root + mode: 0755 + state: directory + recurse: true + +- name: 'Setup MetalK8s StorageClass: Create StorageClass manifests' + template: + src: local-storageclass.yml.j2 + dest: '{{ metalk8s_storage_addon_dir }}/storage-class-{{ item }}.yml' + register: metalk8s_storageclass_manifests + with_items: >- + {%- set _storageclasses = [] -%} + {%- for host in groups['kube-node'] -%} + {%- for vg_prop in hostvars[host].metalk8s_lvm_all_vgs.values() -%} + {%- set _ = _storageclasses.append(vg_prop.storageclass) -%} + {%- endfor -%} + {%- endfor -%} + {{ _storageclasses|unique }} + +- debug: + var: metalk8s_storageclass_manifests + when: debug|bool + +- name: 'Setup MetalK8s StorageClass: Apply manifests for StorageClass' + kube: + kubectl: '{{ bin_dir }}/kubectl' + filename: '{{ item.dest }}' + state: 'latest' + loop_control: + label: '{{ item.dest }}' + with_items: '{{ metalk8s_storageclass_manifests.results|default([]) }}' + run_once: True + +- name: 'Setup MetalK8s StorageClass: Create pv manifests' + template: + src: local-pv.yml.j2 + dest: '{{ metalk8s_storage_addon_dir }}/{{item.value.host }}-pv-{{ item.value.uuid }}.yml' + register: metalk8s_persistenvolumes_manifests + loop_control: + label: '{{ item.value.uuid }}' + with_dict: >- + { + {%- for host in groups['kube-node'] -%} + {%- for lv_name, lv_prop in hostvars[host].metalk8s_lvm_all_lvs.items() -%} + '{{ host }}-{{ lv_name }}': {{ lv_prop }}, + {%- endfor -%} + {%- endfor -%} + } + +- debug: + var: metalk8s_persistenvolumes_manifests + when: debug|bool + +- name: 'Setup MetalK8s StorageClass: Apply manifests for pv' + kube: + kubectl: '{{ bin_dir }}/kubectl' + filename: '{{ item.dest }}' + state: 'latest' + loop_control: + label: '{{ item.dest }}' + with_items: '{{ metalk8s_persistenvolumes_manifests.results|default([]) }}' + run_once: True diff --git a/roles/kube_lvm_storageclass/templates/local-pv.yml.j2 b/roles/kube_lvm_storageclass/templates/local-pv.yml.j2 new file mode 100644 index 0000000000..09d0162949 --- /dev/null +++ b/roles/kube_lvm_storageclass/templates/local-pv.yml.j2 @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: >- + {{ item.value.vg_prop.vg_name|replace("_","-") }}-{{ item.value.uuid }} + labels: {{ item.value.labels|to_json }} +spec: + capacity: + storage: '{{ item.value.size|size_lvm_to_k8s }}' + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: '{{ item.value.vg_prop.storageclass }}' + local: + path: '{{ item.value.vg_prop.host_path }}/{{ item.value.uuid }}' + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - '{{ item.value.host }}' diff --git a/roles/kube_lvm_storage/templates/local-storageclass.yml.j2 b/roles/kube_lvm_storageclass/templates/local-storageclass.yml.j2 similarity index 57% rename from roles/kube_lvm_storage/templates/local-storageclass.yml.j2 rename to roles/kube_lvm_storageclass/templates/local-storageclass.yml.j2 index 374d48d0f2..e48e912ca4 100644 --- a/roles/kube_lvm_storage/templates/local-storageclass.yml.j2 +++ b/roles/kube_lvm_storageclass/templates/local-storageclass.yml.j2 @@ -1,9 +1,9 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: - name: {{ item.key }} + name: {{ item }} annotations: - storageclass.kubernetes.io/is-default-class: '{{ item.value.is_default|default(False)|string|lower }}' + storageclass.kubernetes.io/is-default-class: '{{ (item == metalk8s_default_storageclass)|string|lower }}' provisioner: kubernetes.io/no-provisioner reclaimPolicy: Retain volumeBindingMode: WaitForFirstConsumer diff --git a/roles/setup_lvm/defaults/main.yml b/roles/setup_lvm/defaults/main.yml deleted file mode 100644 index e524ce9b0c..0000000000 --- a/roles/setup_lvm/defaults/main.yml +++ /dev/null @@ -1,16 +0,0 @@ -debug: False - -# ################ # -# LVM confguration # -# ################ # - -# Specify which VG and which drive to use in host_vars for each node -metal_k8s_lvm: - vgs: - kubevg: - # The list of drives used for this LVM Volume Group. Leave empty and set - # create to 'False' if you want to use an already existing Volume Group - drives: [] - # Put this to False if you want to use - # an already existing LVM Volume Group - create: True diff --git a/roles/setup_lvm/tasks/main.yml b/roles/setup_lvm/tasks/main.yml deleted file mode 100644 index 29d5fddc8f..0000000000 --- a/roles/setup_lvm/tasks/main.yml +++ /dev/null @@ -1,27 +0,0 @@ -- name: "LVM Setup: Assert {{ inventory_hostname }} has vgs correctly declared" - assert: - that: - - '(item.value.drives|length > 0 - and item.value.create|default(True)|bool) - or (item.value.drives|length == 0 - and not item.value.create|default(True)|bool)' - with_dict: '{{ metal_k8s_lvm.vgs }}' - -- name: "LVM Setup: Check LVM packages" - package: - name: lvm2 - state: present - -- name: "LVM Setup: Display metal_k8s_lvm config" - debug: - msg: '{{ metal_k8s_lvm.vgs }}' - when: debug|bool - -- name: "LVM Setup: Create the LVM Volume Groups" - lvg: - pvs: '{{ item.value.drives|join(",") }}' - vg: '{{ item.key }}' - state: present - with_dict: '{{ metal_k8s_lvm.vgs }}' - when: - - 'item.value.create|default(True)|bool' diff --git a/roles/setup_lvm_lv/defaults/main.yml b/roles/setup_lvm_lv/defaults/main.yml new file mode 100644 index 0000000000..5877348721 --- /dev/null +++ b/roles/setup_lvm_lv/defaults/main.yml @@ -0,0 +1,38 @@ +debug: False + +# Metalk8s defaults lv/FS options +# This will be combine with the possible specific options +metalk8s_lvm_lv_defaults_fstype: "ext4" +metalk8s_lvm_lv_defaults_fs_opts: "-m 0" +metalk8s_lvm_lv_defaults_force: False +metalk8s_lvm_lv_defaults_mount_opts: "defaults,noatime" + +# Path prefix on the host for the mountpoint of the default LVM LVs +metalk8s_host_path_prefix: "/mnt" + +# The host_path will be determined as this : +# {{ metalk8s_host_path_prefix }}/ +# If you want to change it, define the variable +# metalk8s_host_path_ +# i.e: +# metalk8s_host_path_vg_metalk8s: '/mnt/metalk8s' + + +# Add the extra LVM LV that you want to be managed by this role. +# by specifying the following format in a variable as +# metalk8s_lvm_lvs_: +# lv01: +# size: 50G +# fstype: xfs +# force: True +# fs_opts: '-m 0' +# mount_opts: 'defaults,noatime' +# labels: { +# 'scality.com/mylabel': 'mycustomlabel' +# } +# lv02: +# size: 10G +# +# Only the "size" attribute is mandatory. +# The others attribute value will have the values specified in +# metalk8s_lvm_lv_defaults variable diff --git a/roles/setup_lvm/meta/main.yml b/roles/setup_lvm_lv/meta/main.yml similarity index 100% rename from roles/setup_lvm/meta/main.yml rename to roles/setup_lvm_lv/meta/main.yml diff --git a/roles/setup_lvm_lv/tasks/main.yml b/roles/setup_lvm_lv/tasks/main.yml new file mode 100644 index 0000000000..5cb6bee53b --- /dev/null +++ b/roles/setup_lvm_lv/tasks/main.yml @@ -0,0 +1,161 @@ +- name: "LVM Setup: Check that the host_path prefix is defined" + tags: + - assertion + stat: + path: '{{ metalk8s_host_path_prefix }}' + register: metalk8s_host_path_prefix_stat + +- debug: + var: metalk8s_host_path_prefix_stat + when: debug|bool + +- name: "LVM Setup: Check that the host_path exists and is a directory" + tags: + - assertion + assert: + that: + - 'metalk8s_host_path_prefix_stat.stat.isdir' + +- name: 'LVM Setup: Install ext4 and xfs tools' + package: + name: '{{ item }}' + state: present + with_items: + - e2fsprogs + - xfsprogs + +- name: "LVM Setup: Gather fact with LVM data" + setup: + gather_subset: 'hardware' + filter: 'ansible_lvm' + +- name: 'update default vgs' + set_fact: + metalk8s_lvm_all_vgs: >- + {%- if metalk8s_lvm_default_vg -%} + {%- set _ = metalk8s_lvm_all_vgs[metalk8s_lvm_default_vg].pv_dict.update( + metalk8s_lvm_default_lvs) -%} + {%- endif -%} + {{ metalk8s_lvm_all_vgs }} + +- debug: + var: metalk8s_lvm_all_vgs + when: debug|bool + +- name: 'LVM Setup: Compute all LVM logical volumes' + set_fact: + # Combine te default properties with the one defined on a per LVM LV basis + metalk8s_lvm_all_lvs: >- + { + {%- for vg_name, vg_prop in metalk8s_lvm_all_vgs.items() -%} + {%- set device_prefix = "/dev/mapper/" -%} + {%- for lv_name, lv_prop in vg_prop.pv_dict.items() -%} + {%- set lv_properties = lv_prop|combine({ + 'lv_name': lv_name, + 'vg_prop': vg_prop, + 'force': lv_prop.force|default(metalk8s_lvm_lv_defaults_force), + 'fs_opts': lv_prop.fs_opts + |default(metalk8s_lvm_lv_defaults_fs_opts), + 'fstype': lv_prop.fstype + |default(metalk8s_lvm_lv_defaults_fstype), + 'mount_opts': lv_prop.mount_opts + |default(metalk8s_lvm_lv_defaults_mount_opts), + 'host': inventory_hostname, + }) + -%} + {%- set _ = lv_properties.update({'labels': + lv_prop.labels|default({})|combine({ + 'scality.com/metalk8s_vg': lv_properties.vg_prop.vg_name, + 'scality.com/metalk8s_node': inventory_hostname, + 'scality.com/metalk8s_fstype': lv_properties.fstype, + })}) + -%} + '{{ device_prefix }}{{ vg_name }}-{{ lv_name.replace("-", "--") }}': + {{ lv_properties }}, + {%- endfor -%} + {%- endfor -%} + } + +- name: "LVM Setup: Display LVM LVs computed" + debug: + var: metalk8s_lvm_all_lvs + when: debug|bool + +- name: "LVM Setup: Check that each volume has a size attribute and the VG exists" + tags: + - assertion + assert: + that: + - 'item.value.vg_prop.vg_name in ansible_lvm.vgs' + - '"size" in item.value' + with_dict: '{{ metalk8s_lvm_all_lvs }}' + +- name: 'LVM Setup: Create lvm volumes with required size for each vg' + lvol: + lv: '{{ item.lv_name }}' + vg: '{{ item.vg_prop.vg_name }}' + size: '{{ item.size }}' + state: present + shrink: False + with_items: '{{ metalk8s_lvm_all_lvs.values()|list }}' + +- name: 'LVM Setup: Create filesystem on each LVM LVs' + filesystem: + fstype: '{{ item.value.fstype }}' + dev: '{{ item.key }}' + opts: '{{ item.value.fs_opts }}' + force: '{{ item.value.force }}' + with_dict: '{{ metalk8s_lvm_all_lvs }}' + +- name: 'Setup LVM: Get UUIDs of LVM LVs' + command: blkid -s UUID -o value {{ item.key }} + check_mode: False + changed_when: False + register: metalk8s_lvm_lvs_uuids + with_dict: '{{ metalk8s_lvm_all_lvs }}' + + +# Update metal_k8s_conf with UUIDs of the filesystems +# +# ..code:: +# +# { +# '/dev/mapper/vg_metalk8s-lv01' : { +# 'vg_name': 'vg_metalk8s', +# 'lv_name': 'lv01', +# 'fstype': 'ext4', +# 'size': '10G', +# 'fs_opts': '-m 0', +# 'mount_opts': 'defaults,noatime', +# 'uuid': 'xxxx-yyyy' +# }, +# } + +- name: 'Setup LVM: Update fact metalk8s_lvm_all_lvs with UUIDs' + set_fact: + metalk8s_lvm_all_lvs: >- + {%- for result in metalk8s_lvm_lvs_uuids.results|default([]) -%} + {%- set _ = metalk8s_lvm_all_lvs[result.item.key].update({ + 'uuid': result.stdout}) -%} + {%- endfor -%} + {{ metalk8s_lvm_all_lvs }} + +- name: "Display LVM LVs UUIDs" + debug: + var: metalk8s_lvm_lvs_uuids + when: debug|bool + +- name: 'LVM Setup: Create dir for LVM storage' + file: + dest: '{{ item.value.vg_prop.host_path }}' + state: directory + with_dict: '{{ metalk8s_lvm_all_lvs }}' + +- name: 'LVM Setup: Mount filesystem for each LVM LVs' + mount: + path: '{{ item.value.vg_prop.host_path }}/{{ item.value.uuid }}' + src: UUID={{ item.value.uuid }} + opts: '{{ item.value.mount_opts }}' + fstype: '{{ item.value.fstype }}' + state: mounted + with_dict: '{{ metalk8s_lvm_all_lvs }}' diff --git a/roles/setup_lvm_lv/vars/main.yml b/roles/setup_lvm_lv/vars/main.yml new file mode 100644 index 0000000000..be03a314eb --- /dev/null +++ b/roles/setup_lvm_lv/vars/main.yml @@ -0,0 +1,9 @@ +# this LVM Logical Volumes will be added to the list of user specified LVM LVs +# on the default LVM VG +metalk8s_lvm_default_lvs: + metalk8s_lv01: + size: 52G + metalk8s_lv02: + size: 5G + metalk8s_lv03: + size: 11G diff --git a/roles/setup_lvm_vg/defaults/main.yml b/roles/setup_lvm_vg/defaults/main.yml new file mode 100644 index 0000000000..6bfbad2306 --- /dev/null +++ b/roles/setup_lvm_vg/defaults/main.yml @@ -0,0 +1,31 @@ +debug: False + +# ################ # +# LVM confguration # +# ################ # + +# Specify which VG and which drive to use in host_vars for each node +# This is the default LVM VG created for MetalK8s needs +metalk8s_lvm_default_vg: "vg_metalk8s" + +# The list of drives used for this LVM Volume Group : +# * empty drives list means that the VG already exists +# * list of drives means this devices will be used for the LVM VG +# +# Drives can be either raw devices or partitions. In case of raw devices, +# a check that no lingering partition will be done before creating the +# VG on this(these) drive(s) +# You can also not define this variable, the Volume Group existence will be +# checked and an error will occur if it is absent +# To specify the drives, you need a variable including the name of the +# Volume Group like metalk8s_lvm_drives_ +# i.e: +# metalk8s_lvm_drives_vg_metalk8s: ['/dev/sdb', '/dev/sdc2'] +# Check the documentation #TODO insert link to doc for further information + +# If you need an extra LVM VG, add the name of the Volume Group +# to the following variable +metalk8s_lvm_vgs: ['vg_metalk8s'] + +# this is the StorageClass name of the default VG +metalk8s_default_storageclass: 'local-lvm' diff --git a/roles/setup_lvm_vg/meta/main.yml b/roles/setup_lvm_vg/meta/main.yml new file mode 100644 index 0000000000..32cf5dda7e --- /dev/null +++ b/roles/setup_lvm_vg/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/roles/setup_lvm_vg/tasks/main.yml b/roles/setup_lvm_vg/tasks/main.yml new file mode 100644 index 0000000000..75afaac203 --- /dev/null +++ b/roles/setup_lvm_vg/tasks/main.yml @@ -0,0 +1,75 @@ +# TODO: link to documentation in fail message +- name: "LVM Setup: Detect old configuration style < 0.2.0" + tags: + - assertion + fail: + msg: > + You're still having the old storage configuration. + A breaking change was introduced in MetalK8s 0.2.0 and the default + LVM Volume Group has been changed from "kubevg" to + {{ metalk8s_lvm_default_vg }}. + Please follow the "Upgrading from MetalK8s < 0.2.0" chapter of the + documentation + when: metal_k8s_lvm is defined + +- name: "LVM Setup: Check LVM packages" + package: + name: '{{ item }}' + state: present + register: lvm_just_installed + with_items: + - lvm2 + +- name: "LVM Setup: re-compute facts now that lvm is installed" + setup: + when: lvm_just_installed is changed + +- name: "LVM Setup: Check that the default VG is in the list of managed VGs" + tags: + - assertion + assert: + that: + - not(metalk8s_lvm_default_vg|bool) or metalk8s_lvm_default_vg in metalk8s_lvm_vgs + +- name: 'LVM Setup: Compute list of all vgs' + set_fact: + metalk8s_lvm_all_vgs: >- + { + {%- for vg_name in metalk8s_lvm_vgs -%} + '{{ vg_name }}': {{ dict( + drives=vars['metalk8s_lvm_drives_' ~ vg_name]|default([]), + host_path = vars['metalk8s_host_path_' ~ vg_name]|default( + metalk8s_host_path_prefix ~ "/" ~ vg_name), + pv_dict=vars['metalk8s_lvm_lvs_' ~ vg_name]|default({}), + storageclass=vars['metalk8s_lvm_storageclass' ~ vg_name] + |default(metalk8s_default_storageclass), + vg_name=vg_name, + ) }}, + {%- endfor -%} + } + +- debug: + var: metalk8s_lvm_all_vgs + when: debug|bool + +# drives|length > 0 means we create the VG +# drives| length == 0 means the VG must exists +# - not(item.value.drives|length == 0 and item.key not in ansible_lvm.vgs) + + # TODO: Check that device exists + # TODO: Check that device do not have any ansible_device_links if + # VG is to created + # TODO: Check that device of existing VG are the same if drives are specified + +- name: "LVM Setup: Create the LVM Volume Groups" + lvg: + pvs: '{{ item.value.drives|join(",") }}' + vg: '{{ item.key }}' + state: present + with_dict: >- + { + {%- for vg_name, vg_prop in metalk8s_lvm_all_vgs.items() + if vg_prop.drives -%} + '{{ vg_name }}': {{ vg_prop }}, + {%- endfor -%} + } diff --git a/tests/single-node/inventory/group_vars/k8s-cluster/single_node.yml b/tests/single-node/inventory/group_vars/k8s-cluster/single_node.yml index 31467a807e..f220404df5 100644 --- a/tests/single-node/inventory/group_vars/k8s-cluster/single_node.yml +++ b/tests/single-node/inventory/group_vars/k8s-cluster/single_node.yml @@ -1,26 +1,8 @@ - -metal_k8s_lvm: - vgs: - kubevg: - drives: ['/dev/loop0'] - -metal_k8s_storage_class: - storage_classes: - local-lvm: - is_default: true - lvm_conf: - default_fstype: 'ext4' - default_fs_force: False - default_fs_opts: '-m 0' - default_mount_opts: 'defaults' - vgs: - kubevg: - host_path: '/mnt/kubevg' - storage_class: 'local-lvm' - volumes: - lv01: - size: 11G - lv02: - size: 8G +metalk8s_lvm_drives_vg_metalk8s: ['/dev/loop0'] +metalk8s_lvm_lvs_vg_metalk8s: + lv01: + size: 11G + lv02: + size: 8G metalk8s_elasticsearch_enabled: False diff --git a/tests/single-node/test.sh b/tests/single-node/test.sh index b33e5a447e..5b5720b8f8 100644 --- a/tests/single-node/test.sh +++ b/tests/single-node/test.sh @@ -15,7 +15,7 @@ die() { setup_suite() { echo "Creating loopback block-device and configuring VM" - sudo truncate -s 20G /kube-lvm || die + sudo truncate -s 256G /kube-lvm || die sudo losetup /dev/loop0 /kube-lvm || die echo "Disabling iptables"