Skip to content

staging: Make db1 a rancher node

Antoine R. Dumont requested to merge make-db1-rancher-node into production

We'll keep running the staging dbs on the node as usual (no postgresql service on rancher just yet).

The goal is to add an objstorage on that node, to relieve storage1's objstorage which is almost full. We'll keep storage1's objstorage as a read-only instance. And create a multiplexer objstorage so workers can still read/write on those composed objstorage instances.

octo-diff
*** Running octocatalog-diff on host db1.internal.staging.swh.network
I, [2024-03-01T11:43:54.182530 #1646853]  INFO -- : Catalogs compiled for db1.internal.staging.swh.network
I, [2024-03-01T11:43:54.505642 #1646853]  INFO -- : Diffs computed for db1.internal.staging.swh.network
diff origin/production/db1.internal.staging.swh.network current/db1.internal.staging.swh.network
*******************************************
+ Exec[enforce-sysctl-value-fs.inotify.max_user_instances] =>
   parameters =>
     "command": "/sbin/sysctl -w fs.inotify.max_user_instances=8192",
     "unless": "/usr/bin/test \"$(/sbin/sysctl -n fs.inotify.max_user_instances)\...
*******************************************
+ Exec[enforce-sysctl-value-fs.inotify.max_user_watches] =>
   parameters =>
     "command": "/sbin/sysctl -w fs.inotify.max_user_watches=1048576",
     "unless": "/usr/bin/test \"$(/sbin/sysctl -n fs.inotify.max_user_watches)\" ...
*******************************************
+ Exec[enforce-sysctl-value-vm.max_map_count] =>
   parameters =>
     "command": "/sbin/sysctl -w vm.max_map_count=2097152",
     "unless": "/usr/bin/test \"$(/sbin/sysctl -n vm.max_map_count)\" = 2097152"
*******************************************
+ Exec[sysctl-fs.inotify.max_user_instances] =>
   parameters =>
     "command": "sysctl -p /etc/sysctl.d/fs.inotify.max_user_instances.conf",
     "path": [
       "/usr/sbin",
       "/sbin",
       "/usr/bin",
       "/bin"
     ],
     "refreshonly": true
*******************************************
+ Exec[sysctl-fs.inotify.max_user_watches] =>
   parameters =>
     "command": "sysctl -p /etc/sysctl.d/fs.inotify.max_user_watches.conf",
     "path": [
       "/usr/sbin",
       "/sbin",
       "/usr/bin",
       "/bin"
     ],
     "refreshonly": true
*******************************************
+ Exec[sysctl-vm.max_map_count] =>
   parameters =>
     "command": "sysctl -p /etc/sysctl.d/vm.max_map_count.conf",
     "path": [
       "/usr/sbin",
       "/sbin",
       "/usr/bin",
       "/bin"
     ],
     "refreshonly": true
*******************************************
+ Exec[update-sysctl.conf-fs.inotify.max_user_instances] =>
   parameters =>
     "command": "sed -i -e 's#^fs.inotify.max_user_instances *=.*#fs.inotify.max_...
     "onlyif": "grep -E '^fs.inotify.max_user_instances *=' /etc/sysctl.conf",
     "path": [
       "/usr/sbin",
       "/sbin",
       "/usr/bin",
       "/bin"
     ],
     "refreshonly": true
*******************************************
+ Exec[update-sysctl.conf-fs.inotify.max_user_watches] =>
   parameters =>
     "command": "sed -i -e 's#^fs.inotify.max_user_watches *=.*#fs.inotify.max_us...
     "onlyif": "grep -E '^fs.inotify.max_user_watches *=' /etc/sysctl.conf",
     "path": [
       "/usr/sbin",
       "/sbin",
       "/usr/bin",
       "/bin"
     ],
     "refreshonly": true
*******************************************
+ Exec[update-sysctl.conf-vm.max_map_count] =>
   parameters =>
     "command": "sed -i -e 's#^vm.max_map_count *=.*#vm.max_map_count = 2097152#'...
     "onlyif": "grep -E '^vm.max_map_count *=' /etc/sysctl.conf",
     "path": [
       "/usr/sbin",
       "/sbin",
       "/usr/bin",
       "/bin"
     ],
     "refreshonly": true
*******************************************
+ File[/etc/rancher/rke2/config.yaml.d/50-snaphotter.yaml] =>
   parameters =>
     "ensure": "absent"
*******************************************
+ File[/etc/rancher/rke2/config.yaml.d/50-snapshotter.yaml] =>
   parameters =>
     "content": "# File managed by puppet - modifications will be lost\nsnapshott...
     "group": "root",
     "mode": "0644",
     "owner": "root"
*******************************************
+ File[/etc/rancher/rke2/config.yaml.d] =>
   parameters =>
     "ensure": "directory",
     "group": "root",
     "mode": "0755",
     "owner": "root"
*******************************************
+ File[/etc/rancher/rke2] =>
   parameters =>
     "ensure": "directory",
     "group": "root",
     "mode": "0755",
     "owner": "root"
*******************************************
+ File[/etc/rancher] =>
   parameters =>
     "ensure": "directory",
     "group": "root",
     "mode": "0755",
     "owner": "root"
*******************************************
+ File[/etc/sysctl.d/99-sysctl.conf] =>
   parameters =>
     "ensure": "link",
     "group": "root",
     "owner": "root",
     "target": "../sysctl.conf"
*******************************************
+ File[/etc/sysctl.d/fs.inotify.max_user_instances.conf] =>
   parameters =>
     "content": "fs.inotify.max_user_instances = 8192\n",
     "group": "root",
     "mode": "0644",
     "notify": [
       "Exec[sysctl-fs.inotify.max_user_instances]",
       "Exec[update-sysctl.conf-fs.inotify.max_user_instances]"
     ],
     "owner": "root"
*******************************************
+ File[/etc/sysctl.d/fs.inotify.max_user_watches.conf] =>
   parameters =>
     "content": "fs.inotify.max_user_watches = 1048576\n",
     "group": "root",
     "mode": "0644",
     "notify": [
       "Exec[sysctl-fs.inotify.max_user_watches]",
       "Exec[update-sysctl.conf-fs.inotify.max_user_watches]"
     ],
     "owner": "root"
*******************************************
+ File[/etc/sysctl.d/vm.max_map_count.conf] =>
   parameters =>
     "content": "vm.max_map_count = 2097152\n",
     "group": "root",
     "mode": "0644",
     "notify": [
       "Exec[sysctl-vm.max_map_count]",
       "Exec[update-sysctl.conf-vm.max_map_count]"
     ],
     "owner": "root"
*******************************************
+ File[/etc/sysctl.d] =>
   parameters =>
     "ensure": "directory",
     "group": "root",
     "mode": "0755",
     "owner": "root",
     "purge": false,
     "recurse": false
*******************************************
+ File[/var/lib/rancher/rke2/agent/containerd/io.containerd.snapshotter.v1.zfs] =>
   parameters =>
     "ensure": "directory",
     "group": "root",
     "mode": "0700",
     "owner": "root"
*******************************************
+ File[/var/lib/rancher/rke2/agent/containerd] =>
   parameters =>
     "ensure": "directory",
     "group": "root",
     "mode": "0711",
     "owner": "root"
*******************************************
+ File[/var/lib/rancher/rke2/agent] =>
   parameters =>
     "ensure": "directory",
     "group": "root",
     "mode": "0755",
     "owner": "root"
*******************************************
+ File[/var/lib/rancher/rke2] =>
   parameters =>
     "ensure": "directory",
     "group": "root",
     "mode": "0755",
     "owner": "root"
*******************************************
+ File[/var/lib/rancher] =>
   parameters =>
     "ensure": "directory",
     "group": "root",
     "mode": "0755",
     "owner": "root"
*******************************************
+ Sysctl[fs.inotify.max_user_instances] =>
   parameters =>
     "enforce": true,
     "suffix": ".conf",
     "value": 8192
*******************************************
+ Sysctl[fs.inotify.max_user_watches] =>
   parameters =>
     "enforce": true,
     "suffix": ".conf",
     "value": 1048576
*******************************************
+ Sysctl[vm.max_map_count] =>
   parameters =>
     "enforce": true,
     "suffix": ".conf",
     "value": 2097152
*******************************************
+ Zfs[data/kubelet] =>
   parameters =>
     "atime": "off",
     "compression": "zstd",
     "ensure": "present",
     "mountpoint": "/var/lib/kubelet"
*******************************************
+ Zfs[data/rancher] =>
   parameters =>
     "atime": "off",
     "compression": "zstd",
     "ensure": "present",
     "mountpoint": "/var/lib/rancher"
*******************************************
+ Zfs[data/volumes] =>
   parameters =>
     "atime": "off",
     "compression": "zstd",
     "ensure": "present",
     "mountpoint": "/srv/kubernetes/volumes"
*******************************************
+ Zpool[data] =>
   parameters =>
     "ensure": "present"
*******************************************
*** End octocatalog-diff on db1.internal.staging.swh.network

Refs. swh/infra/sysadm-environment#5260

Edited by Antoine R. Dumont

Merge request reports