Browse Source

new release and S3

Pierre-Yves Barriat 1 year ago
parent
commit
2054f5a5c0
67 changed files with 2513 additions and 147 deletions
  1. 25 0
      dev/README.md
  2. 41 7
      dev/Vagrantfile
  3. 17 0
      dev/provisioning/ansible/ceph.yml
  4. 4 1
      dev/provisioning/ansible/database.yml
  5. 2 2
      dev/provisioning/ansible/essai.yml
  6. 7 0
      dev/provisioning/ansible/hosts_openstack
  7. 9 4
      dev/provisioning/ansible/nextcloud.yml
  8. 28 0
      dev/provisioning/ansible/nextcloud_cism.yml
  9. 1 0
      dev/provisioning/ansible/playbook.yml
  10. 24 0
      dev/provisioning/ansible/roles/ceph/defaults/main.yml
  11. 147 0
      dev/provisioning/ansible/roles/ceph/tasks/main.yml
  12. 27 0
      dev/provisioning/ansible/roles/ceph/tasks/setup/RedHat.yml
  13. 23 0
      dev/provisioning/ansible/roles/ceph/tasks/setup/Rocky.yml
  14. 13 0
      dev/provisioning/ansible/roles/ceph/tasks/setup/Suse.yml
  15. 69 0
      dev/provisioning/ansible/roles/ceph/tasks/setup/Ubuntu.yml
  16. 15 0
      dev/provisioning/ansible/roles/ceph/templates/radosgw.yml.j2
  17. 23 0
      dev/provisioning/ansible/roles/ceph/vars/Ubuntu.yml
  18. 2 2
      dev/provisioning/ansible/roles/haproxy/tasks/main.yml
  19. 2 2
      dev/provisioning/ansible/roles/keepalived/tasks/main.yml
  20. 3 0
      dev/provisioning/ansible/roles/mariadb/defaults/main.yml
  21. 12 9
      dev/provisioning/ansible/roles/mariadb/tasks/config/secure-installation.yml
  22. 1 1
      dev/provisioning/ansible/roles/mariadb/tasks/config/secure.yml
  23. 13 0
      dev/provisioning/ansible/roles/mariadb/tasks/config/template.yml
  24. 2 2
      dev/provisioning/ansible/roles/mariadb/tasks/main.yml
  25. 0 6
      dev/provisioning/ansible/roles/mariadb/tasks/setup/RedHat.yml
  26. 0 6
      dev/provisioning/ansible/roles/mariadb/tasks/setup/Rocky.yml
  27. 0 6
      dev/provisioning/ansible/roles/mariadb/tasks/setup/Suse.yml
  28. 2 0
      dev/provisioning/ansible/roles/mariadb/templates/server.j2
  29. 0 1
      dev/provisioning/ansible/roles/mariadb/vars/RedHat.yml
  30. 0 1
      dev/provisioning/ansible/roles/mariadb/vars/Rocky.yml
  31. 36 25
      dev/provisioning/ansible/roles/nextcloud/defaults/main.yml
  32. 243 0
      dev/provisioning/ansible/roles/nextcloud/files/NextcloudBackup.sh
  33. 269 0
      dev/provisioning/ansible/roles/nextcloud/files/NextcloudRestore.sh
  34. BIN
      dev/provisioning/ansible/roles/nextcloud/files/pexels-jaymantri-5439.jpg
  35. 20 14
      dev/provisioning/ansible/roles/nextcloud/tasks/main.yml
  36. 8 0
      dev/provisioning/ansible/roles/nextcloud/tasks/nc_ceph.yml
  37. 0 0
      dev/provisioning/ansible/roles/nextcloud/tasks/nc_gluster.yml
  38. 0 49
      dev/provisioning/ansible/roles/nextcloud/tasks/nc_multiple.yml
  39. 30 0
      dev/provisioning/ansible/roles/nextcloud/tasks/nc_setup.yml
  40. 61 0
      dev/provisioning/ansible/roles/nextcloud/templates/NextcloudBackupRestore.conf.j2
  41. 17 0
      dev/provisioning/ansible/roles/nextcloud/templates/objectstore_s3_config.php.j2
  42. 0 1
      dev/provisioning/ansible/roles/nextcloud/templates/redis.config.php.j2
  43. 21 0
      dev/provisioning/ansible/roles/nextcloud/vars/ldap_CISM.yml
  44. 21 0
      dev/provisioning/ansible/roles/nextcloud/vars/main.yml
  45. 5 0
      dev/provisioning/ansible/roles/nfs/defaults/main.yml
  46. 3 0
      dev/provisioning/ansible/roles/nfs/handlers/main.yml
  47. 36 0
      dev/provisioning/ansible/roles/nfs/tasks/main.yml
  48. 7 0
      dev/provisioning/ansible/roles/nfs/tasks/setup/Debian.yml
  49. 9 0
      dev/provisioning/ansible/roles/nfs/tasks/setup/RedHat.yml
  50. 9 0
      dev/provisioning/ansible/roles/nfs/tasks/setup/Rocky.yml
  51. 13 0
      dev/provisioning/ansible/roles/nfs/templates/exports.j2
  52. 2 0
      dev/provisioning/ansible/roles/nfs/vars/Debian.yml
  53. 2 0
      dev/provisioning/ansible/roles/nfs/vars/Fedora.yml
  54. 2 0
      dev/provisioning/ansible/roles/nfs/vars/RedHat.yml
  55. 2 0
      dev/provisioning/ansible/roles/nfs/vars/Rocky.yml
  56. 2 2
      dev/provisioning/ansible/roles/proxysql/tasks/main.yml
  57. 1 1
      dev/provisioning/ansible/roles/redis/tasks/main.yml
  58. 3 2
      dev/provisioning/ansible/roles/web_php/tasks/main.yml
  59. 1 0
      dev/provisioning/ansible/roles/web_php/tasks/web/Rocky.yml
  60. 3 3
      dev/provisioning/bash/common.sh
  61. 18 0
      dev/provisioning/bash/install-docker.sh
  62. 15 0
      dev/tools/box-metadata.json
  63. 86 0
      dev/tools/nextcloud-S3-migration/README.md
  64. 895 0
      dev/tools/nextcloud-S3-migration/localtos3.php
  65. 144 0
      dev/tools/nextcloud-S3-migration/s3_test.php
  66. 17 0
      dev/tools/nextcloud-S3-migration/storage.config.php
  67. 0 0
      report/compile.sh

+ 25 - 0
dev/README.md

@@ -16,14 +16,21 @@ sudo apt install virtualbox
 ### Install vagrant & ansible
 
 ```bash
+sudo apt update
+sudo apt install software-properties-common
+sudo add-apt-repository --yes --update ppa:ansible/ansible
 sudo apt install ansible
 
 wget -O- https://apt.releases.hashicorp.com/gpg | gpg --dearmor | sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg
 echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
 sudo apt update && sudo apt install vagrant
 
+vagrant plugin install vagrant-vbguest
+
 vagrant plugin install vagrant-hostmanager
 
+vagrant plugin install vagrant-disksize
+
 ansible-galaxy collection install ansible.posix
 
 ansible-galaxy collection install community.crypto
@@ -31,6 +38,14 @@ ansible-galaxy collection install community.crypto
 ansible-galaxy collection install community.general
 
 ansible-galaxy collection install community.mysql
+
+ansible-galaxy collection install community.aws
+```
+
+> In case of using `ceph`, do:
+```bash
+export VAGRANT_EXPERIMENTAL="disks"
+vagrant reload
 ```
 
 ## Deploy
@@ -168,3 +183,13 @@ TODO
 HOSTS = [
 ]
 ```
+
+## Apply Ansible on Openstack
+
+Examples:
+
+```bash
+ansible -v -i '192.168.64.68,' --key-file /home/nextcloud/Documents/Secure/Unix/ssh/id_rsa_pedro -u rocky -b -m setup all
+
+ansible-playbook -v -i provisioning/ansible/hosts_openstack -b provisioning/ansible/nextcloud_aio.yml
+```

+ 41 - 7
dev/Vagrantfile

@@ -37,16 +37,24 @@ HOSTS = [
 ]
 
 HOSTS = [
-  { :hostname => "db1",         :ip => NETWORK+"11",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "db_servers"       },
+  { :hostname => "db1",         :ip => NETWORK+"11",  :ram => 2048,  :cpu => 1,  :box => "rockylinux/8",   :group => "db_servers",     :synced_folder => "/backup/pelican/backup"},
   #{ :hostname => "db2",         :ip => NETWORK+"12",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "db_servers"       },
   #{ :hostname => "db3",         :ip => NETWORK+"13",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "db_servers"       },
   #{ :hostname => "lbsql1",      :ip => NETWORK+"19",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "db_lbal_servers", :state => "MASTER",  :priority => 101, :vip => NETWORK+"20" },
   #{ :hostname => "lbsql2",      :ip => NETWORK+"18",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "db_lbal_servers", :state => "BACKUP",  :priority => 100, :vip => NETWORK+"20" },
-  { :hostname => "gl1",         :ip => NETWORK+"31",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "gluster_servers"  },
-  { :hostname => "gl2",         :ip => NETWORK+"32",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "gluster_servers"  },
-  { :hostname => "web.test",    :ip => NETWORK+"41",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "web_servers",     :ipdb => NETWORK+"20", :redisd => "keydb", :redisp => "6380", :redisv => NETWORK+"40", :priority => 101 },
-  { :hostname => "web2.test",   :ip => NETWORK+"42",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "web_servers",     :ipdb => NETWORK+"20", :redisd => "keydb", :redisp => "6380", :redisv => NETWORK+"40", :priority => 100 },
-  { :hostname => "lb.test",     :ip => NETWORK+"51",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "lbal_servers"     },
+  #{ :hostname => "gl1",         :ip => NETWORK+"31",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "gluster_servers"  },
+  #{ :hostname => "gl2",         :ip => NETWORK+"32",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "gluster_servers"  },
+  #{ :hostname => "ceph1",       :ip => NETWORK+"71",  :ram => 3072,  :cpu => 1,  :box => "ubuntu/focal64", :group => "ceph_servers",    :disk_extra => "10GB",  :disk_name => "ceph_storage_extra" },
+  #{ :hostname => "ceph2",       :ip => NETWORK+"72",  :ram => 2048,  :cpu => 1,  :box => "ubuntu/focal64", :group => "ceph_servers",    :disk_extra => "10GB",  :disk_name => "ceph_storage" },
+  #{ :hostname => "ceph3",       :ip => NETWORK+"73",  :ram => 2048,  :cpu => 1,  :box => "ubuntu/focal64", :group => "ceph_servers",    :disk_extra => "10GB",  :disk_name => "ceph_storage" },
+  { :hostname => "web.test",    :ip => NETWORK+"41",  :ram => 3072,  :cpu => 1,  :box => "rockylinux/8",   :group => "web_servers"      },
+  #{ :hostname => "web.test",    :ip => NETWORK+"41",  :ram => 2048,  :cpu => 1,  :box => "rockylinux/8",   :group => "web_servers",     :synced_folder => "/backup/pelican"},
+  #{ :hostname => "web.test",    :ip => NETWORK+"41",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "web_servers",     :ipdb => NETWORK+"20", :redisd => "keydb", :redisp => "6380", :redisv => NETWORK+"40", :priority => 101 },
+  #{ :hostname => "web2.test",   :ip => NETWORK+"42",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "web_servers",     :ipdb => NETWORK+"20", :redisd => "keydb", :redisp => "6380", :redisv => NETWORK+"40", :priority => 100 },
+  #{ :hostname => "lb.test",     :ip => NETWORK+"51",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "lbal_servers"     },
+  { :hostname => "ceph1",       :ip => NETWORK+"71",  :ram => 3072,  :cpu => 1,  :box => "ubuntu/focal64", :group => "ceph_servers"     },
+  { :hostname => "ceph2",       :ip => NETWORK+"72",  :ram => 2048,  :cpu => 1,  :box => "ubuntu/focal64", :group => "ceph_servers"     },
+  { :hostname => "ceph3",       :ip => NETWORK+"73",  :ram => 2048,  :cpu => 1,  :box => "ubuntu/focal64", :group => "ceph_servers"     },
 ]
 
 # Defined ansible playbook
@@ -210,7 +218,11 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
       # Set system options
       cpu = server[:cpu] ? server[:cpu] : 1;
       memory = server[:ram] ? server[:ram] : 512;
+      if server[:group] == "db_servers"
+        conf.disksize.size = '50GB'
+      end
       name = server[:hostname] ? server[:hostname] : "linux";
+      conf.vm.synced_folder ".", "/vagrant", disabled: true
       conf.vm.provider "virtualbox" do |vbox|
         vbox.cpus   = cpu.to_s
         vbox.memory = memory.to_s
@@ -218,6 +230,28 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
         if server[:box] == "rockylinux/8"
           vbox.customize ["modifyvm", :id, "--firmware", "efi"]
         end
+        #if server[:group] != "web_servers"
+          if Vagrant.has_plugin?("vagrant-vbguest")
+            conf.vbguest.auto_update = false
+          end
+        #end
+      end
+      if !server[:disk_extra].nil? && !server[:disk_name].nil?
+        disk = server[:disk_extra] ? server[:disk_extra] : "10GB";
+        dname = server[:disk_name] ? server[:disk_name] : "extra_storage"
+        conf.vm.disk :disk, size: disk.to_s, name: dname.to_s
+      end
+      if !server[:synced_folder].nil?
+        host_folder = server[:synced_folder]
+        #if server[:group] == "web_servers"
+        #  guest_folder = "/external"
+        #  conf.vm.network "forwarded_port", guest: 636, host: 636
+        #  conf.vm.synced_folder host_folder.to_s, guest_folder.to_s, mount_options: ["uid=48", "gid=48"]
+        #end
+        if server[:group] == "db_servers"
+          guest_folder = "/external"
+          conf.vm.synced_folder host_folder.to_s, guest_folder.to_s, mount_options: ["uid=993", "gid=989"]
+        end
       end
       # Set network options
       netmask = server[:netmask] || NETMASK
@@ -234,7 +268,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
       conf.vm.provision "shell" do |s|
         s.path = "provisioning/bash/common.sh"
         s.args = [server[:box]]
-      end     
+      end
       # Provision nodes with Ansible.
       # The index used here in order to execute the provision just after all
       # the servers are up and running.

+ 17 - 0
dev/provisioning/ansible/ceph.yml

@@ -0,0 +1,17 @@
+---
+- name: apply ceph role
+  collections:
+    - community.aws
+  hosts: ceph_servers
+  vars:
+    ansible_python_interpreter: /usr/bin/python3
+  pre_tasks:
+    - name: define ansible_python_interpreter group // linux distribution
+      set_fact:
+        ansible_python_interpreter: /usr/bin/python2
+      when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7'
+  roles:
+    - role: ceph
+      #vars:
+      #  rgw_frontend_port: 443
+      #  rgw_ssl: true

+ 4 - 1
dev/provisioning/ansible/database.yml

@@ -11,4 +11,7 @@
         ansible_python_interpreter: /usr/bin/python2
       when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7'
   roles:
-    - { role: mariadb }
+    - role: mariadb
+      vars:
+        mariadb_datadir: "/extent/mysql"
+        mariadb_socket: "/extent/mysql/mysql.sock"

+ 2 - 2
dev/provisioning/ansible/essai.yml

@@ -14,8 +14,8 @@
   roles:
     #- role: redis
     #  when: redis_daemon == "redis" or redis_daemon == "keydb"
-    - role: web_php
+    #- role: web_php
     #  vars:
     #    enable_php_fpm: false
     - role: mariadb
-    - role: nextcloud
+    #- role: nextcloud

+ 7 - 0
dev/provisioning/ansible/hosts_openstack

@@ -0,0 +1,7 @@
+nc-web-1.cism.ucl.ac.be ansible_host=192.168.64.68 ansible_user=rocky ansible_ssh_private_key_file=/home/nextcloud/Documents/Secure/Unix/ssh/id_rsa_pedro
+
+[all]
+nc-web-1.cism.ucl.ac.be network_allowed=192.168.64.0/24
+
+[web_servers]
+nc-web-1.cism.ucl.ac.be network_allowed=192.168.64.0/24 nc_global_name=nextcloud.test

+ 9 - 4
dev/provisioning/ansible/nextcloud.yml

@@ -6,8 +6,8 @@
   hosts: web_servers
   vars:
     ansible_python_interpreter: /usr/bin/python3
-    #redis_daemon: "redis"
-    #redis_port: "6379"
+    redis_daemon: "redis"
+    redis_port: "6379"
   pre_tasks:
     - name: define ansible_python_interpreter group // linux distribution
       set_fact:
@@ -29,5 +29,10 @@
     #  vars:
     #    enable_php_fpm: false
     - role: nextcloud
-      #vars:
-        #NEXTCLOUD_VERSION: "24.0.8"
+      vars:
+        ansible_become_pass: ""
+        NEXTCLOUD_VERSION: "24.0.4"
+        objectstore_s3_install: false
+        #objectstore_s3_key: "229ZJOPCR6JHHU4HIP69"
+        #objectstore_s3_secret: "xCnx98XN39fDKh3ACGQ9XuDJEOw7PRBxyFW4KjRs"
+        #objectstore_s3_hostname: "192.168.56.71"

+ 28 - 0
dev/provisioning/ansible/nextcloud_cism.yml

@@ -0,0 +1,28 @@
+---
+- name: apply nextcloud role
+  collections:
+    - community.general
+    - ansible.posix
+  hosts: web_servers
+  vars:
+    ansible_python_interpreter: /usr/bin/python3
+  pre_tasks:
+    - name: install stuffs to be integrated later in salt
+      dnf:
+        name: ['epel-release', 'policycoreutils-python-utils', 'python3-cryptography', 'libselinux-python3']
+        state: present
+      when: ansible_os_family == 'Rocky'
+    - name: add hostname in hosts
+      blockinfile:
+        dest: /etc/hosts
+        #content: '{{ ansible_host }}	{{ nc_global_name }}'
+        content: '{{ ansible_host }} {{ ansible_fqdn }}'
+        state: present
+  roles:
+    #- role: mariadb
+    #- role: web_php
+    - role: nextcloud
+  environment:
+    http_proxy: "http://proxy.sipr.ucl.ac.be:889"
+    https_proxy: "http://proxy.sipr.ucl.ac.be:889"
+    no_proxy: "127.0.0.1, localhost, 192.168.64.68, 192.168.64.73, 192.168.64.51, nextcloud.test, ceph.cism.ucl.ac.be, 192.168.64.68"

+ 1 - 0
dev/provisioning/ansible/playbook.yml

@@ -2,6 +2,7 @@
 - import_playbook: database.yml
 - import_playbook: sql_loadbalancer.yml
 - import_playbook: storage.yml
+- import_playbook: ceph.yml
 - import_playbook: nextcloud.yml
 - import_playbook: loadbalancer.yml
 

+ 24 - 0
dev/provisioning/ansible/roles/ceph/defaults/main.yml

@@ -0,0 +1,24 @@
+---
+# defaults file for ceph
+ceph_release: quincy
+ceph_mirror: https://download.ceph.com
+ceph_stable_key: https://download.ceph.com/keys/release.asc
+ceph_pkgs:
+  - chrony
+  - cephadm
+  - ceph-common
+ceph_client_pkgs:
+  - chrony
+  - ceph-common
+
+new_host_timezone: Europe/Brussels
+cephadm_url: https://github.com/ceph/ceph/raw/pacific/src/cephadm/cephadm
+cephadm_ssh_user: vagrant
+
+rgw_user: pedro
+rgw_name: PY_Barriat
+rgw_number_of_daemons: 2
+rgw_frontend_port: 7480
+rgw_ssl: false
+rgw_network_allowed: "{{ network_allowed }}"
+rgw_first_bucket: nextcloud

+ 147 - 0
dev/provisioning/ansible/roles/ceph/tasks/main.yml

@@ -0,0 +1,147 @@
+---
+# tasks file
+
+- name: Set main ceph host ip
+  set_fact:
+    ceph_main: "{{ hostvars[groups['ceph_servers'][0]]['ansible_host'] }}"
+
+- name: Check ceph status
+  stat:
+    path: /var/log/ceph/rgw_start.log
+  register: ceph_status
+
+- name: Ending if ceph up and running
+  meta: end_play
+  when: ceph_status.stat.exists
+
+- name: Include OS specific variables.
+  include_vars: "{{ ansible_distribution }}.yml"
+
+- name: Install OS specific setup
+  include_tasks: "setup/{{ ansible_distribution }}.yml"
+
+- name: Set the timezone to the {{ new_host_timezone }} one
+  timezone: "name={{ new_host_timezone }}"
+
+- name: Make sure ntp is started, and is enabled on restart.
+  service:
+    name: chrony
+    state: started
+    enabled: yes
+    masked: no
+
+- name: Bootstrap a new cluster
+  shell: cephadm bootstrap --mon-ip {{ ansible_host }} --ssh-user {{ cephadm_ssh_user }} >> bootstrap.log
+  args:
+    chdir: /var/log/ceph
+    creates: bootstrap.log
+  run_once: true
+  delegate_to: "{{ groups['ceph_servers'][0] }}"
+
+- name: get the cephadm ssh pub key
+  command: "ceph cephadm get-pub-key"
+  changed_when: false
+  run_once: true
+  register: cephadm_get_pub_key
+  delegate_to: "{{ groups['ceph_servers'][0] }}"
+
+- name: allow ssh public key for {{ cephadm_ssh_user | default('root') }} account
+  authorized_key:
+    user: "{{ cephadm_ssh_user | default('root') }}"
+    key: "{{ cephadm_get_pub_key.stdout }}"
+
+- name: Restart chronyd
+  service:
+    name: chronyd
+    state: restarted
+
+- name: Pause to build ceph config based on cephadm bootstrap
+  pause:
+    minutes: 5
+
+      #- name: Ending if ceph status is HEALTH_OK
+      #  block:
+      #    - name: Register ceph status
+      #      shell: ceph status -f json-pretty
+      #      register: ceph_status
+      #      no_log: true
+      #
+      #    - name: Check ceph status
+      #      meta: end_play
+      #      when: (ceph_status.stdout | from_json).health.status == "HEALTH_OK"
+      #  run_once: true
+      #  delegate_to: "{{ groups['ceph_servers'][0] }}"
+
+- name: Config the new cluster
+  block:
+    - name: set cephadm ssh user to {{ cephadm_ssh_user }}
+      command: "ceph cephadm set-user {{ cephadm_ssh_user | default('root') }}"
+      changed_when: false
+    
+    - name: add all ceph hosts
+      shell: ceph orch host add {{ hostvars[groups['ceph_servers'][host_idx]]['inventory_hostname'] }} {{ hostvars[groups['ceph_servers'][host_idx]]['ansible_host'] }}
+      when: host_idx != 0
+      loop: "{{ groups['ceph_servers'] }}"
+      loop_control:
+        index_var: host_idx
+        pause: 2
+    
+    - name: apply osd all available devices
+      shell: ceph orch apply osd --all-available-devices
+    
+        #- name: Copy radosgw.yml file
+        #  template:
+        #    src: radosgw.yml.j2
+        #    dest: /etc/ceph/radosgw.yml
+        #    owner: root
+        #    group: root
+        #    mode: 0644
+
+        #- name: Start a S3 Ceph Rados Gateway
+        #  shell: ceph orch apply -i /etc/ceph/radosgw.yml  >> rgw_start.log
+        #  args:
+        #    chdir: /var/log/ceph
+        #    creates: rgw_start.log
+
+    - name: Start a S3 Ceph Rados Gateway
+      shell: ceph orch apply rgw s3
+
+    - name: Pause to build ceph config based on S3 Rados Gateway
+      pause:
+        minutes: 5
+    
+    - name: set rgw-credentials for dashboard
+      shell: ceph dashboard set-rgw-credentials
+    
+    - name: create a radosgw user in order to access the object gateway service
+      shell: radosgw-admin user create --uid={{ rgw_user }} --display-name={{ rgw_name }} --system
+      register: user_rgw_json
+    
+    - name: check user rgw keys
+      set_fact:
+        access_key: "{{ (user_rgw_json.stdout | from_json) | json_query('keys[0].access_key') }}"
+        secret_key: "{{ (user_rgw_json.stdout | from_json) | json_query('keys[0].secret_key') }}"
+   
+    - name: set user access_key in tmp
+      shell: echo {{ access_key|string }} > /tmp/access_key
+    
+    - name: set user secret_key in tmp
+      shell: echo {{ secret_key|string }} > /tmp/secret_key
+    
+    - name: set user access_key for dashboard
+      shell: ceph dashboard set-rgw-api-access-key -i /tmp/access_key
+    
+    - name: set user secret_key for dashboard
+      shell: ceph dashboard set-rgw-api-secret-key -i /tmp/secret_key
+
+    - name: create a first bucket
+      amazon.aws.s3_bucket:
+        name: "{{ rgw_first_bucket }}"
+        endpoint_url: "http://{{ ceph_main }}:{{ rgw_frontend_port }}"
+        access_key: "{{ access_key|string }}"
+        secret_key: "{{ secret_key|string }}"
+        ceph: true
+        validate_certs: false
+      ignore_errors: yes
+  run_once: true
+  delegate_to: "{{ groups['ceph_servers'][0] }}"

+ 27 - 0
dev/provisioning/ansible/roles/ceph/tasks/setup/RedHat.yml

@@ -0,0 +1,27 @@
+---
+# Install mariadb
+- name: Add MariaDB Repository for {{ ansible_distribution }}
+  template:
+    src: mariadb-server.repo.j2
+    dest: /etc/yum.repos.d/mariadb-server.repo
+
+- name: Install all the {{ ansible_distribution }} mariadb packages
+  dnf:
+    name: "{{ mariadb_packages }}"
+    state: present
+
+- name: Install rsync in case of Galera
+  dnf: 
+    name: rsync
+    state: present
+  when: groups['db_servers'] | length > 1
+
+- name: Set galera_wsrep_provider variable
+  set_fact:
+    galera_wsrep_provider: "/usr/lib64/galera-4/libgalera_smm.so"
+
+- name: Mariadb service
+  service:
+    name: "{{ mariadb_service }}"
+    state: started
+    enabled: yes

+ 23 - 0
dev/provisioning/ansible/roles/ceph/tasks/setup/Rocky.yml

@@ -0,0 +1,23 @@
+---
+# Install mariadb
+- name: Add MariaDB Repository for {{ ansible_distribution }}
+  template:
+    src: mariadb-server.repo.j2
+    dest: /etc/yum.repos.d/mariadb-server.repo
+
+- name: Install all the {{ ansible_distribution }} mariadb packages
+  dnf:
+    name: "{{ mariadb_packages }}"
+    state: present
+
+- name: Install rsync in case of Galera
+  dnf: 
+    name: rsync
+    state: present
+  when: (nb_db_servers | int) > 1
+
+- name: Mariadb service
+  service:
+    name: "{{ mariadb_service }}"
+    state: started
+    enabled: yes

+ 13 - 0
dev/provisioning/ansible/roles/ceph/tasks/setup/Suse.yml

@@ -0,0 +1,13 @@
+---
+# Install mariadb
+
+- name: Install all the Suse mariadb packages
+  zypper:
+    name: "{{ mariadb_packages }}"
+    state: present
+
+- name: Mariadb service
+  service:
+    name: "{{ mariadb_service }}"
+    state: started
+    enabled: yes

+ 69 - 0
dev/provisioning/ansible/roles/ceph/tasks/setup/Ubuntu.yml

@@ -0,0 +1,69 @@
+---
+- name: enable repo from download.ceph.com
+  block:
+    - name: prevent ceph certificate error
+      apt:
+        name: ca-certificates
+        state: latest
+        update_cache: yes
+      register: result
+      until: result is succeeded
+
+    - name: configure ceph community repository stable key
+      apt_key:
+        url: "{{ ceph_stable_key }}"
+        state: present
+
+    - name: configure Ceph community repository
+      apt_repository:
+        repo: "deb {{ ceph_mirror }}/debian-{{ ceph_release }}/ {{ ansible_facts['distribution_release'] }} main"
+        state: present
+        filename: ceph
+        update_cache: no
+
+- name: install prerequisites packages
+  apt:
+    name: "{{ dist_packages }}"
+    state: latest
+    update_cache: yes
+  register: result
+  until: result is succeeded
+
+- name: install ceph packages
+  apt:
+    name: "{{ ceph_pkgs }}"
+    state: present
+    update_cache: yes 
+  register: result
+  until: result is succeeded
+
+- name: install container engine
+  block:
+    - name: install docker
+      block:
+        - name: uninstall old version packages
+          apt:
+            name: "{{ item }}"
+            state: absent
+          loop: "{{ docker_old_packages }}"
+
+        - name: configure docker repository key
+          apt_key:
+            url: "{{ docker_key }}"
+            state: present
+
+        - name: setup docker repository
+          apt_repository:
+            repo: "{{ docker_repo }}"
+            state: present
+            filename: docker
+            update_cache: no
+
+        - name: install docker
+          apt:
+            name: "{{ item }}"
+            state: present
+            update_cache: yes
+          register: result
+          until: result is succeeded
+          loop: "{{ docker_packages }}"

+ 15 - 0
dev/provisioning/ansible/roles/ceph/templates/radosgw.yml.j2

@@ -0,0 +1,15 @@
+service_type: rgw
+service_id: default
+placement:
+  hosts:
+{% for item in groups['ceph_servers'] | list %}
+  - {{ item }}
+{% endfor %}
+  count_per_host: {{ rgw_number_of_daemons }}
+networks:
+  - {{ rgw_network_allowed }}
+spec:
+  rgw_realm: default
+  rgw_zone: default
+  rgw_frontend_port: {{ rgw_frontend_port }}
+  ssl: {{ rgw_ssl }}

+ 23 - 0
dev/provisioning/ansible/roles/ceph/vars/Ubuntu.yml

@@ -0,0 +1,23 @@
+---
+dist_packages:
+  - ca-certificates
+  - python3
+  - chrony
+  - python3-botocore
+  - python3-boto3
+
+docker_old_packages:
+  - docker
+  - docker-engine
+  - docker.io
+  - containerd
+  - runc
+
+docker_key: https://download.docker.com/linux/ubuntu/gpg
+
+docker_repo: "deb https://download.docker.com/linux/ubuntu {{ ansible_facts['distribution_release'] }} stable"
+
+docker_packages:
+  - docker-ce
+  - docker-ce-cli
+  - containerd.io

+ 2 - 2
dev/provisioning/ansible/roles/haproxy/tasks/main.yml

@@ -10,10 +10,10 @@
         haproxy_backend_servers: "{{ groups['test_servers'] | list }}"
 
 - name: Include OS specific variables.
-  include_vars: "{{ ansible_os_family }}.yml"
+  include_vars: "{{ ansible_distribution }}.yml"
 
 - name: Install packages
-  include_tasks: "setup/{{ ansible_os_family }}.yml"
+  include_tasks: "setup/{{ ansible_distribution }}.yml"
 
 - name: Get HAProxy version.
   command: haproxy -v

+ 2 - 2
dev/provisioning/ansible/roles/keepalived/tasks/main.yml

@@ -1,10 +1,10 @@
 ---
 
 - name: Include OS specific variables
-  include_vars: "{{ ansible_os_family }}.yml"
+  include_vars: "{{ ansible_distribution }}.yml"
 
 - name: Install packages
-  include_tasks: "setup/{{ ansible_os_family }}.yml"
+  include_tasks: "setup/{{ ansible_distribution }}.yml"
 
 - name: Get IP range.
   shell: "echo {{ network_allowed }} | cut -d'.' --fields=1,2,3"

+ 3 - 0
dev/provisioning/ansible/roles/mariadb/defaults/main.yml

@@ -19,6 +19,9 @@ disable_remote_root_login: true
 mariadb_bind_address: '0.0.0.0'
 mariadb_port: 3306
 
+mariadb_datadir: /var/lib/mysql
+mariadb_socket: /var/lib/mysql/mysql.sock
+
 # Add mariabd databases
 # default create nothing
 mariadb_database: 

+ 12 - 9
dev/provisioning/ansible/roles/mariadb/tasks/config/secure-installation.yml

@@ -18,17 +18,15 @@
 #  when: mysql_user_name != mysql_root_username
 
 - name: Disallow root login remotely
-  command: 'mysql -NBe "{{ item }}"'
+  command: 'mysql -NBe "{{ item }}" -S {{ mariadb_socket }}'
   with_items:
     - DELETE FROM mysql.user WHERE User='{{ mysql_root_username }}' AND Host NOT IN ('localhost', '127.0.0.1', '::1')
   changed_when: false
 
 - name: Get list of hosts for the root user.
-  command: mysql -NBe
-    "SELECT Host
-    FROM mysql.user
-    WHERE User = '{{ mysql_root_username }}'
-    ORDER BY (Host='localhost') ASC"
+  command: 'mysql -NBe "{{ item }}" -S {{ mariadb_socket }}'
+  with_items:
+    - SELECT Host FROM mysql.user WHERE User='{{ mysql_root_username }}' ORDER BY (Host='localhost') ASC
   register: mysql_root_hosts
   changed_when: false
   check_mode: false
@@ -50,7 +48,9 @@
     mode: 0600
 
 - name: Get list of hosts for the anonymous user.
-  command: mysql -NBe "SELECT Host FROM mysql.user WHERE User = ''"
+  command: 'mysql -NBe "{{ item }}" -S {{ mariadb_socket }}'
+  with_items:
+    - SELECT Host FROM mysql.user WHERE User = ''
   register: mysql_anonymous_hosts
   changed_when: false
   check_mode: false
@@ -65,10 +65,13 @@
 - name: Remove mysql users.
   mysql_user:
     name: "{{ item }}"
-    host: "localhost"
+    login_unix_socket: "{{ mariadb_socket }}"
     state: absent
   with_items:
     - mysql
 
 - name: Remove MySQL test database.
-  mysql_db: "name='test' state=absent"
+  mysql_db: 
+    name: test
+    state: absent
+    login_unix_socket: "{{ mariadb_socket }}"

+ 1 - 1
dev/provisioning/ansible/roles/mariadb/tasks/config/secure.yml

@@ -7,7 +7,7 @@
   ignore_errors: true
 
 - name: Disallow root login remotely
-  command: 'mysql -NBe "{{ item }}" -p"{{ mysql_root_password }}"'
+  command: 'mysql -NBe "{{ item }}" -p"{{ mysql_root_password }}" -S {{ mariadb_socket }}'
   with_items:
     - DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1')
   changed_when: false

+ 13 - 0
dev/provisioning/ansible/roles/mariadb/tasks/config/template.yml

@@ -21,3 +21,16 @@
     group: "{{ mariadb_config_file_group }}"
     mode: 0644
   when: (nb_db_servers | int) > 1
+
+- name: Synchronize mariadb_datadir if custom path
+  synchronize:
+    src: /var/lib/mysql/
+    dest: "{{ mariadb_datadir }}"
+  delegate_to: "{{ inventory_hostname }}"
+  when: mariadb_datadir != "/var/lib/mysql"
+
+- name: Mariadb service
+  service:
+    name: "{{ mariadb_service }}"
+    state: started
+    enabled: yes 

+ 2 - 2
dev/provisioning/ansible/roles/mariadb/tasks/main.yml

@@ -16,7 +16,7 @@
   when: "'db_servers' in groups.keys()"
 
 - name: Include OS specific variables.
-  include_vars: "{{ ansible_os_family }}.yml"
+  include_vars: "{{ ansible_distribution }}.yml"
 
 - name: collect facts about system services
   service_facts:
@@ -37,7 +37,7 @@
       #  when: my_service.state != "unknown" and my_service.status != "disabled"
 
 - name: Install Mariadb
-  include_tasks: "setup/{{ ansible_os_family }}.yml"
+  include_tasks: "setup/{{ ansible_distribution }}.yml"
   when: my_service.status == "disabled"
 
 - name: Ensure Mariadb configfile is present

+ 0 - 6
dev/provisioning/ansible/roles/mariadb/tasks/setup/RedHat.yml

@@ -19,9 +19,3 @@
 - name: Set galera_wsrep_provider variable
   set_fact:
     galera_wsrep_provider: "/usr/lib64/galera-4/libgalera_smm.so"
-
-- name: Mariadb service
-  service:
-    name: "{{ mariadb_service }}"
-    state: started
-    enabled: yes

+ 0 - 6
dev/provisioning/ansible/roles/mariadb/tasks/setup/Rocky.yml

@@ -15,9 +15,3 @@
     name: rsync
     state: present
   when: (nb_db_servers | int) > 1
-
-- name: Mariadb service
-  service:
-    name: "{{ mariadb_service }}"
-    state: started
-    enabled: yes

+ 0 - 6
dev/provisioning/ansible/roles/mariadb/tasks/setup/Suse.yml

@@ -5,9 +5,3 @@
   zypper:
     name: "{{ mariadb_packages }}"
     state: present
-
-- name: Mariadb service
-  service:
-    name: "{{ mariadb_service }}"
-    state: started
-    enabled: yes

+ 2 - 0
dev/provisioning/ansible/roles/mariadb/templates/server.j2

@@ -11,6 +11,8 @@
 [mysqld]
 bind-address = {{ mariadb_bind_address }}
 port = {{ mariadb_port }}
+datadir = {{ mariadb_datadir }}
+socket = {{ mariadb_socket }}
 
 {% if mariadb_slow_query_log_enabled == true %}
 slow_query_log = 1

+ 0 - 1
dev/provisioning/ansible/roles/mariadb/vars/RedHat.yml

@@ -8,7 +8,6 @@ mariadb_packages:
 mariadb_service: mariadb
 mariadb_config_file: /etc/my.cnf.d/server.cnf
 galera_config_file: /etc/my.cnf.d/galera.cnf
-mariadb_socket: /var/lib/mysql/mysql.sock
 mariadb_config_file_owner: root
 mariadb_config_file_group: root
 

+ 0 - 1
dev/provisioning/ansible/roles/mariadb/vars/Rocky.yml

@@ -8,7 +8,6 @@ mariadb_packages:
 mariadb_service: mariadb
 mariadb_config_file: /etc/my.cnf.d/server.cnf
 galera_config_file: /etc/my.cnf.d/galera.cnf
-mariadb_socket: /var/lib/mysql/mysql.sock
 mariadb_config_file_owner: root
 mariadb_config_file_group: root
 

+ 36 - 25
dev/provisioning/ansible/roles/nextcloud/defaults/main.yml

@@ -1,18 +1,21 @@
 ---
-# [NEXTCLOUD CONFIG]
-nc_global_name: "nc.test"
-nextcloud_ipv6: false
-
-# defaults file for nextcloud
-NEXTCLOUD_VERSION: "25.0.2"
+# [INSTALL_CONFIG]
+NEXTCLOUD_VERSION: "25.0.4"
 NEXTCLOUD_TARBALL: "nextcloud-{{ NEXTCLOUD_VERSION }}.tar.bz2"
 NEXTCLOUD_URL: "https://download.nextcloud.com/server/releases/{{ NEXTCLOUD_TARBALL }}"
 NEXTCLOUD_GPG: "https://nextcloud.com/nextcloud.asc"
 GPG_FINGERPRINT: "28806A878AE423A28372792ED75899B9A724937A"
 
+# [DATABASE_CONFIG]
+db_host: "127.0.0.1" # overload from the role
+db_port: "3306"
+nc_db_name: "nextcloudb"
+nc_db_user: "nextcloudb" # overload from the role
+nc_db_password: "secret" # overload from the role
+
 # [REDIS CONFIG]
 redis_daemon: "none"
-redis_host: "127.0.0.1"    # overload from the role
+redis_host: "127.0.0.1"
 redis_port: "6379"
 
 # [WEB CONFIG]
@@ -20,15 +23,13 @@ ssl_path: "/etc/{{ ansible_fqdn }}/ssl"
 nc_data_dir: "/srv/data"
 nc_admin_name: "pedro"
 nc_admin_pwd: "pedro"
-#
-nfs_server: "127.0.0.1"
-nfs_data_path: "/nextcloud/data"
-nfs_web_path: "/nextcloud/web"
 
-nc_loglevel: 2
-nc_log_rotate_size: 10485760
-nc_background_cron: true
-nc_cron_period: 10 # every <nc_cron_period> min
+# [NEXTCLOUD CONFIG]
+nc_global_name: "nc.test"
+nextcloud_ipv6: false
+# If our internet requires proxy to work
+nc_proxy: false
+nc_proxy_url: "proxy.sipr.ucl.ac.be:889"
 
 ## Custom nextcloud settings
 ## https://docs.nextcloud.com/server/12/admin_manual/configuration_server/config_sample_php_parameters.html
@@ -41,20 +42,20 @@ nextcloud_config_settings:
   - { name: 'mail_domain', value: 'uclouvain.be' }
   - { name: 'mail_smtphost', value: 'smtp.sgsi.ucl.ac.be' }
   - { name: 'mail_smtpauthtype', value: 'LOGIN' }
-    #- { name: 'overwrite.cli.url', value: 'https://{{ nc_global_name }}' }
-    #- { name: 'overwritehost', value: '{{ nc_global_name }}' }
   - { name: 'overwriteprotocol', value: 'https' }
 
-#php /var/www/html/occ config:system:set share_folder --value="/Shared"
+# [LOG]
+nc_loglevel: 2
+nc_log_rotate_size: 10485760
+nc_background_cron: true
+nc_cron_period: 10 # every <nc_cron_period> min
 
-# [DATABASE]
-db_host: "127.0.0.1" # overload from the role
-db_port: "3306"
-nc_db_name: "nextcloudb"
-nc_db_user: "nextcloudb" # overload from the role
-nc_db_password: "secret" # overload from the role
+# [BACKUP]
+nfs_server: "127.0.0.1"
+nfs_path: "/backup/nextcloud"
 
 # [APPS]
+nc_collabora: false
 nextcloud_apps:
   - twofactor_totp
   - deck
@@ -63,4 +64,14 @@ nextcloud_apps:
   - contacts
   #- apporder
 
-nc_collabora: false
+# [OBJECTSTORE]
+objectstore_s3_install: false
+objectstore_s3_bucket_name: "nextcloud"
+objectstore_s3_key: "9TEYR63U7US6LID709C4"
+objectstore_s3_secret: "72mwowlA1pW81n6hw8SyJD8vxNDSZfwHaOnQ4hhR"
+objectstore_s3_hostname: "192.168.64.51"
+objectstore_s3_port: "7480"
+objectstore_s3_use_ssl: false
+objectstore_s3_region: "be"
+objectstore_s3_use_path_style: true
+

+ 243 - 0
dev/provisioning/ansible/roles/nextcloud/files/NextcloudBackup.sh

@@ -0,0 +1,243 @@
+#!/bin/bash
+
+#
+# Bash script for creating backups of Nextcloud.
+#
+# Version 3.0.3
+#
+# Requirements:
+#	- pigz (https://zlib.net/pigz/) for using backup compression. If not available, you can use another compression algorithm (e.g. gzip)
+#
+# Supported database systems:
+# 	- MySQL/MariaDB
+# 	- PostgreSQL
+#
+# Usage:
+# 	- With backup directory specified in the script:  ./NextcloudBackup.sh
+# 	- With backup directory specified by parameter: ./NextcloudBackup.sh <backupDirectory> (e.g. ./NextcloudBackup.sh /media/hdd/nextcloud_backup)
+#
+# The script is based on an installation of Nextcloud using nginx and MariaDB, see https://decatec.de/home-server/nextcloud-auf-ubuntu-server-20-04-lts-mit-nginx-mariadb-php-lets-encrypt-redis-und-fail2ban/
+#
+
+
+# Make sure the script exits when any command fails
+set -Eeuo pipefail
+
+# Variables
+working_dir=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)
+configFile="${working_dir}/NextcloudBackupRestore.conf"   # Holds the configuration for NextcloudBackup.sh and NextcloudRestore.sh
+_backupMainDir=${1:-}
+
+# Function for error messages
+errorecho() { cat <<< "$@" 1>&2; }
+
+#
+# Check if config file exists
+#
+if [ ! -f "${configFile}" ]
+then
+	errorecho "ERROR: Configuration file $configFile cannot be found!"
+	errorecho "Please make sure that a configuratrion file '$configFile' is present in the main directory of the scripts."
+	errorecho "This file can be created automatically using the setup.sh script."
+	exit 1
+fi
+
+source "$configFile" || exit 1  # Read configuration variables
+
+if [ -n "$_backupMainDir" ]; then
+	backupMainDir=$(echo $_backupMainDir | sed 's:/*$::')
+fi
+
+currentDate=$(date +"%Y%m%d_%H%M%S")
+
+# The actual directory of the current backup - this is a subdirectory of the main directory above with a timestamp
+backupDir="${backupMainDir}/${currentDate}"
+
+function DisableMaintenanceMode() {
+	echo "$(date +"%H:%M:%S"): Switching off maintenance mode..."
+	sudo -u "${webserverUser}" php ${nextcloudFileDir}/occ maintenance:mode --off
+	echo "Done"
+	echo
+}
+
+# Capture CTRL+C
+trap CtrlC INT
+
+function CtrlC() {
+	read -p "Backup cancelled. Keep maintenance mode? [y/n] " -n 1 -r
+	echo
+
+	if ! [[ $REPLY =~ ^[Yy]$ ]]
+	then
+		DisableMaintenanceMode
+	else
+		echo "Maintenance mode still enabled."
+	fi
+
+	echo "Starting web server..."
+	systemctl start "${webserverServiceName}"
+	echo "Done"
+	echo
+
+	exit 1
+}
+
+#
+# Print information
+#
+echo "Backup directory: ${backupMainDir}"
+
+#
+# Check for root
+#
+if [ "$(id -u)" != "0" ]
+then
+	errorecho "ERROR: This script has to be run as root!"
+	exit 1
+fi
+
+#
+# Check if backup dir already exists
+#
+if [ ! -d "${backupDir}" ]
+then
+	mkdir -p "${backupDir}"
+else
+	errorecho "ERROR: The backup directory ${backupDir} already exists!"
+	exit 1
+fi
+
+#
+# Set maintenance mode
+#
+echo "$(date +"%H:%M:%S"): Set maintenance mode for Nextcloud..."
+sudo -u "${webserverUser}" php ${nextcloudFileDir}/occ maintenance:mode --on
+echo "Done"
+echo
+
+#
+# Stop web server
+#
+echo "$(date +"%H:%M:%S"): Stopping web server..."
+systemctl stop "${webserverServiceName}"
+echo "Done"
+echo
+
+#
+# Backup file directory
+#
+echo "$(date +"%H:%M:%S"): Creating backup of Nextcloud file directory..."
+
+if [ "$useCompression" = true ] ; then
+	`$compressionCommand "${backupDir}/${fileNameBackupFileDir}" -C "${nextcloudFileDir}" .`
+else
+	tar -cpf "${backupDir}/${fileNameBackupFileDir}" -C "${nextcloudFileDir}" .
+fi
+
+echo "Done"
+echo
+
+#
+# Backup data directory
+#
+echo "$(date +"%H:%M:%S"): Creating backup of Nextcloud data directory..."
+
+if [ "$includeUpdaterBackups" = false ] ; then
+	echo "Ignoring Nextcloud updater backup directory"
+
+	if [ "$useCompression" = true ] ; then
+		`$compressionCommand "${backupDir}/${fileNameBackupDataDir}"  --exclude="updater-*/backups/*" -C "${nextcloudDataDir}" .`
+	else
+		tar -cpf "${backupDir}/${fileNameBackupDataDir}"  --exclude="updater-*/backups/*" -C "${nextcloudDataDir}" .
+	fi
+else
+	if [ "$useCompression" = true ] ; then
+		`$compressionCommand "${backupDir}/${fileNameBackupDataDir}"  -C "${nextcloudDataDir}" .`
+	else
+		tar -cpf "${backupDir}/${fileNameBackupDataDir}"  -C "${nextcloudDataDir}" .
+	fi
+fi
+
+echo "Done"
+echo
+
+#
+# Backup local external storage.
+#
+if [ ! -z "${nextcloudLocalExternalDataDir+x}" ] ; then
+	echo "$(date +"%H:%M:%S"): Creating backup of Nextcloud local external storage directory..."
+
+	if [ "$useCompression" = true ] ; then
+		`$compressionCommand "${backupDir}/${fileNameBackupExternalDataDir}"  -C "${nextcloudLocalExternalDataDir}" .`
+	else
+		tar -cpf "${backupDir}/${fileNameBackupExternalDataDir}"  -C "${nextcloudLocalExternalDataDir}" .
+	fi
+
+	echo "Done"
+	echo
+fi
+
+#
+# Backup DB
+#
+if [ "${databaseSystem,,}" = "mysql" ] || [ "${databaseSystem,,}" = "mariadb" ]; then
+  	echo "$(date +"%H:%M:%S"): Backup Nextcloud database (MySQL/MariaDB)..."
+
+	if ! [ -x "$(command -v mysqldump)" ]; then
+		errorecho "ERROR: MySQL/MariaDB not installed (command mysqldump not found)."
+		errorecho "ERROR: No backup of database possible!"
+	else
+		mysqldump --single-transaction -h localhost -u "${dbUser}" -p"${dbPassword}" "${nextcloudDatabase}" > "${backupDir}/${fileNameBackupDb}"
+	fi
+
+	echo "Done"
+	echo
+elif [ "${databaseSystem,,}" = "postgresql" ] || [ "${databaseSystem,,}" = "pgsql" ]; then
+	echo "$(date +"%H:%M:%S"): Backup Nextcloud database (PostgreSQL)..."
+
+	if ! [ -x "$(command -v pg_dump)" ]; then
+		errorecho "ERROR: PostgreSQL not installed (command pg_dump not found)."
+		errorecho "ERROR: No backup of database possible!"
+	else
+		PGPASSWORD="${dbPassword}" pg_dump "${nextcloudDatabase}" -h localhost -U "${dbUser}" -f "${backupDir}/${fileNameBackupDb}"
+	fi
+
+	echo "Done"
+	echo
+fi
+
+#
+# Start web server
+#
+echo "$(date +"%H:%M:%S"): Starting web server..."
+systemctl start "${webserverServiceName}"
+echo "Done"
+echo
+
+#
+# Disable maintenance mode
+#
+DisableMaintenanceMode
+
+#
+# Delete old backups
+#
+if [ ${maxNrOfBackups} != 0 ]
+then
+	nrOfBackups=$(ls -l ${backupMainDir} | grep -c ^d)
+
+	if [ ${nrOfBackups} -gt ${maxNrOfBackups} ]
+	then
+		echo "$(date +"%H:%M:%S"): Removing old backups..."
+		ls -t ${backupMainDir} | tail -$(( nrOfBackups - maxNrOfBackups )) | while read -r dirToRemove; do
+			echo "${dirToRemove}"
+			rm -r "${backupMainDir}/${dirToRemove:?}"
+			echo "Done"
+			echo
+		done
+	fi
+fi
+
+echo
+echo "DONE!"
+echo "$(date +"%H:%M:%S"): Backup created: ${backupDir}"

+ 269 - 0
dev/provisioning/ansible/roles/nextcloud/files/NextcloudRestore.sh

@@ -0,0 +1,269 @@
+#!/bin/bash
+
+#
+# Bash script for restoring backups of Nextcloud.
+#
+# Version 3.0.3
+#
+# Requirements:
+#	- pigz (https://zlib.net/pigz/) for using backup compression. If not available, you can use another compression algorithm (e.g. gzip)
+#
+# Supported database systems:
+# 	- MySQL/MariaDB
+# 	- PostgreSQL
+#
+# Usage:
+#   - With backup directory specified in the script: ./NextcloudRestore.sh <BackupName> (e.g. ./NextcloudRestore.sh 20170910_132703)
+#   - With backup directory specified by parameter: ./NextcloudRestore.sh <BackupName> <BackupDirectory> (e.g. ./NextcloudRestore.sh 20170910_132703 /media/hdd/nextcloud_backup)
+#
+# The script is based on an installation of Nextcloud using nginx and MariaDB, see https://decatec.de/home-server/nextcloud-auf-ubuntu-server-20-04-lts-mit-nginx-mariadb-php-lets-encrypt-redis-und-fail2ban/
+#
+
+
+# Make sure the script exits when any command fails
+set -Eeuo pipefail
+
+# Variables
+working_dir=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)
+configFile="${working_dir}/NextcloudBackupRestore.conf"   # Holds the configuration for NextcloudBackup.sh and NextcloudRestore.sh
+restore=${1:-}
+_backupMainDir=${2:-}
+
+# Function for error messages
+errorecho() { cat <<< "$@" 1>&2; }
+
+#
+# Check if config file exists
+#
+if [ ! -f "${configFile}" ]
+then
+	errorecho "ERROR: Configuration file $configFile cannot be found!"
+    errorecho "Please make sure that a configuration file '$configFile' is present in the main directory of the scripts."
+    errorecho "This file can be created automatically using the setup.sh script."
+    exit 1
+fi
+
+source "$configFile" || exit 1  # Read configuration variables
+
+if [ -n "$_backupMainDir" ]; then
+	backupMainDir="$_backupMainDir"
+fi
+
+echo "Backup directory: $backupMainDir"
+
+currentRestoreDir="${backupMainDir}/${restore}"
+
+#
+# Check if parameter(s) given
+#
+if [ $# != "1" ] && [ $# != "2" ]
+then
+    errorecho "ERROR: No backup name to restore given, or wrong number of parameters!"
+    errorecho "Usage: NextcloudRestore.sh 'BackupDate' ['BackupDirectory']"
+    exit 1
+fi
+
+#
+# Check for root
+#
+if [ "$(id -u)" != "0" ]
+then
+    errorecho "ERROR: This script has to be run as root!"
+    exit 1
+fi
+
+#
+# Check if backup dir exists
+#
+if [ ! -d "${currentRestoreDir}" ]
+then
+	errorecho "ERROR: Backup ${restore} not found!"
+    exit 1
+fi
+
+#
+# Check if the commands for restoring the database are available
+#
+if [ "${databaseSystem,,}" = "mysql" ] || [ "${databaseSystem,,}" = "mariadb" ]; then
+    if ! [ -x "$(command -v mysql)" ]; then
+		errorecho "ERROR: MySQL/MariaDB not installed (command mysql not found)."
+		errorecho "ERROR: No restore of database possible!"
+        errorecho "Cancel restore"
+        exit 1
+    fi
+elif [ "${databaseSystem,,}" = "postgresql" ] || [ "${databaseSystem,,}" = "pgsql" ]; then
+    if ! [ -x "$(command -v psql)" ]; then
+		errorecho "ERROR: PostgreSQL not installed (command psql not found)."
+		errorecho "ERROR: No restore of database possible!"
+        errorecho "Cancel restore"
+        exit 1
+	fi
+fi
+
+#
+# Set maintenance mode
+#
+echo "$(date +"%H:%M:%S"): Set maintenance mode for Nextcloud..."
+sudo -u "${webserverUser}" php ${nextcloudFileDir}/occ maintenance:mode --on
+echo "Done"
+echo
+
+#
+# Stop web server
+#
+echo "$(date +"%H:%M:%S"): Stopping web server..."
+systemctl stop "${webserverServiceName}"
+echo "Done"
+echo
+
+#
+# Delete old Nextcloud directories
+#
+
+# File directory
+echo "$(date +"%H:%M:%S"): Deleting old Nextcloud file directory..."
+rm -rf "${nextcloudFileDir}"
+mkdir -p "${nextcloudFileDir}"
+echo "Done"
+echo
+
+# Data directory
+echo "$(date +"%H:%M:%S"): Deleting old Nextcloud data directory..."
+rm -rf "${nextcloudDataDir}/*"
+
+echo "Done"
+echo
+
+# Local external storage
+if [ ! -z "${nextcloudLocalExternalDataDir+x}" ] ; then
+    echo "Deleting old Nextcloud local external storage directory..."
+    rm -rf "${nextcloudLocalExternalDataDir}/*"
+    echo "Done"
+    echo
+fi
+
+#
+# Restore file and data directory
+#
+
+# File directory
+echo "$(date +"%H:%M:%S"): Restoring Nextcloud file directory..."
+
+if [ "$useCompression" = true ] ; then
+    `$extractCommand "${currentRestoreDir}/${fileNameBackupFileDir}" -C "${nextcloudFileDir}"`
+else
+    tar -xmpf "${currentRestoreDir}/${fileNameBackupFileDir}" -C "${nextcloudFileDir}"
+fi
+
+echo "Done"
+echo
+
+# Data directory
+echo "$(date +"%H:%M:%S"): Restoring Nextcloud data directory..."
+
+if [ "$useCompression" = true ] ; then
+    `$extractCommand "${currentRestoreDir}/${fileNameBackupDataDir}" -C "${nextcloudDataDir}"`
+else
+    tar -xmpf "${currentRestoreDir}/${fileNameBackupDataDir}" -C "${nextcloudDataDir}"
+fi
+
+echo "Done"
+echo
+
+# Local external storage
+if [ ! -z "${nextcloudLocalExternalDataDir+x}" ] ; then
+    echo "$(date +"%H:%M:%S"): Restoring Nextcloud local external storage directory..."
+
+    if [ "$useCompression" = true ] ; then
+        `$extractCommand "${currentRestoreDir}/${fileNameBackupExternalDataDir}" -C "${nextcloudLocalExternalDataDir}"`
+    else
+        tar -xmpf "${currentRestoreDir}/${fileNameBackupExternalDataDir}" -C "${nextcloudLocalExternalDataDir}"
+    fi
+
+    echo "Done"
+    echo
+fi
+
+#
+# Restore database
+#
+echo "$(date +"%H:%M:%S"): Dropping old Nextcloud DB..."
+
+if [ "${databaseSystem,,}" = "mysql" ] || [ "${databaseSystem,,}" = "mariadb" ]; then
+    mysql -h localhost -u "${dbUser}" -p"${dbPassword}" -e "DROP DATABASE ${nextcloudDatabase}"
+elif [ "${databaseSystem,,}" = "postgresql" ]; then
+	sudo -u postgres psql -c "DROP DATABASE ${nextcloudDatabase};"
+fi
+
+echo "Done"
+echo
+
+echo "$(date +"%H:%M:%S"): Creating new DB for Nextcloud..."
+
+if [ "${databaseSystem,,}" = "mysql" ] || [ "${databaseSystem,,}" = "mariadb" ]; then
+    if [ ! -z "${dbNoMultibyte+x}" ] && [ "${dbNoMultibyte}" = true ] ; then
+        # Database from the backup DOES NOT use UTF8 with multibyte support (e.g. for emoijs in filenames)
+        mysql -h localhost -u "${dbUser}" -p"${dbPassword}" -e "CREATE DATABASE ${nextcloudDatabase}"
+    else
+        # Database from the backup uses UTF8 with multibyte support (e.g. for emoijs in filenames)
+        mysql -h localhost -u "${dbUser}" -p"${dbPassword}" -e "CREATE DATABASE ${nextcloudDatabase} CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci"
+    fi
+elif [ "${databaseSystem,,}" = "postgresql" ] || [ "${databaseSystem,,}" = "pgsql" ]; then
+    sudo -u postgres psql -c "CREATE DATABASE ${nextcloudDatabase} WITH OWNER ${dbUser} TEMPLATE template0 ENCODING \"UNICODE\";"
+fi
+
+echo "Done"
+echo
+
+echo "$(date +"%H:%M:%S"): Restoring backup DB..."
+
+if [ "${databaseSystem,,}" = "mysql" ] || [ "${databaseSystem,,}" = "mariadb" ]; then
+	mysql -h localhost -u "${dbUser}" -p"${dbPassword}" "${nextcloudDatabase}" < "${currentRestoreDir}/${fileNameBackupDb}"
+elif [ "${databaseSystem,,}" = "postgresql" ] || [ "${databaseSystem,,}" = "pgsql" ]; then
+	sudo -u postgres psql "${nextcloudDatabase}" < "${currentRestoreDir}/${fileNameBackupDb}"
+fi
+
+echo "Done"
+echo
+
+#
+# Start web server
+#
+echo "$(date +"%H:%M:%S"): Starting web server..."
+systemctl start "${webserverServiceName}"
+echo "Done"
+echo
+
+#
+# Set directory permissions
+#
+echo "$(date +"%H:%M:%S"): Setting directory permissions..."
+chown -R "${webserverUser}":"${webserverUser}" "${nextcloudFileDir}"
+chown -R "${webserverUser}":"${webserverUser}" "${nextcloudDataDir}"
+
+if [ ! -z "${nextcloudLocalExternalDataDir+x}" ] ; then
+    chown -R "${webserverUser}":"${webserverUser}" "${nextcloudLocalExternalDataDir}"
+fi
+
+echo "Done"
+echo
+
+#
+# Disbale maintenance mode
+#
+echo "$(date +"%H:%M:%S"): Switching off maintenance mode..."
+sudo -u "${webserverUser}" php ${nextcloudFileDir}/occ maintenance:mode --off
+echo "Done"
+echo
+
+#
+# Update the system data-fingerprint (see https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/occ_command.html#maintenance-commands-label)
+#
+echo "$(date +"%H:%M:%S"): Updating the system data-fingerprint..."
+sudo -u "${webserverUser}" php ${nextcloudFileDir}/occ maintenance:data-fingerprint
+echo "Done"
+echo
+
+echo
+echo "DONE!"
+echo "$(date +"%H:%M:%S"): Backup ${restore} successfully restored."

BIN
dev/provisioning/ansible/roles/nextcloud/files/pexels-jaymantri-5439.jpg


+ 20 - 14
dev/provisioning/ansible/roles/nextcloud/tasks/main.yml

@@ -31,7 +31,7 @@
 - name: Main... Show Selinux variable
   debug: var=ansible_selinux
 
-- include_tasks: "prep_os/{{ ansible_os_family }}.yml"
+- include_tasks: "prep_os/{{ ansible_distribution }}.yml"
 
 - name: Main... Create shared directories (web & data)
   file:
@@ -46,8 +46,8 @@
   ignore_errors: yes
 
 - name: Main... Mount shared directories
-  include_tasks: "nc_storage.yml"
-  when: (nb_web_servers | int) > 1
+  include_tasks: "nc_gluster.yml"
+  when: "(nb_web_servers | int) > 1 and 'gluster_servers' in groups.keys()"
 
 - name: Main... Check if Nextcloud is already in the web repo
   stat:
@@ -70,8 +70,8 @@
     state: started
 
 - name: Main... Check Nextcloud status
+  become: yes
   become_user: "{{ nextcloud_websrv_user }}"
-  become: true
   shell: "{{ php_bin }} occ status --output=json"
   args:
     chdir: "{{ http_webroot }}/nextcloud"
@@ -90,6 +90,10 @@
   delegate_to: "{{ main_web_server }}"
   run_once: true
 
+- name: Main... Loading Ceph object primary storage
+  include_tasks: "nc_ceph.yml"
+  when: "'ceph_servers' in groups.keys() and objectstore_s3_install"
+
 - name: Main... Install and deploy Nextcloud if needed
   include_tasks: nc_install.yml
   args:
@@ -117,14 +121,14 @@
   set_fact:
     trusted_dom: "{{ list_zero + list_one + list_two }}"
 
-- name: Main... Set a local redirect name
-  set_fact:
-    redirect_name: "{{ main_web_server }}"
-
-- name: Main... Set a global redirect name if loadbalancing
-  set_fact:
-    redirect_name: "{{ nc_global_name | default(nc.test) }}"
-  when: groups['lbal_servers'] is defined and (groups['lbal_servers']|length>0)
+      #- name: Main... Set a local redirect name
+      #  set_fact:
+      #    redirect_name: "{{ main_web_server }}"
+      #
+      #- name: Main... Set a global redirect name if loadbalancing
+      #  set_fact:
+      #    redirect_name: "{{ nc_global_name | default(nc.test) }}"
+      #  when: groups['lbal_servers'] is defined and (groups['lbal_servers']|length>0)
 
 - name: Main... Set Trusted Local Domain
   become_user: "{{ nextcloud_websrv_user }}"
@@ -146,8 +150,10 @@
   args:
     chdir: "{{ http_webroot }}/nextcloud"
   with_items:
-    - { name: 'overwritehost', value: '{{ redirect_name }}' }
-    - { name: 'overwrite.cli.url', value: 'https://{{ redirect_name }}' }
+    #- { name: 'overwritehost', value: '{{ redirect_name }}' }
+    #- { name: 'overwrite.cli.url', value: 'https://{{ redirect_name }}' }
+    - { name: 'overwritehost', value: '{{ nc_global_name }}' }
+    - { name: 'overwrite.cli.url', value: 'https://{{ nc_global_name }}' }
   delegate_to: "{{ main_web_server }}"
   run_once: true
 

+ 8 - 0
dev/provisioning/ansible/roles/nextcloud/tasks/nc_ceph.yml

@@ -0,0 +1,8 @@
+---
+- name: Ceph... Set object storage setup
+  template:
+    dest: "{{ http_webroot }}/nextcloud/config/storage.config.php"
+    src: objectstore_s3_config.php.j2
+    owner: "{{ nextcloud_websrv_user }}"
+    group: "{{ nextcloud_websrv_group }}"
+    mode: 0640

+ 0 - 0
dev/provisioning/ansible/roles/nextcloud/tasks/nc_storage.yml → dev/provisioning/ansible/roles/nextcloud/tasks/nc_gluster.yml


+ 0 - 49
dev/provisioning/ansible/roles/nextcloud/tasks/nc_multiple.yml

@@ -1,49 +0,0 @@
----
-#########
-# Run command line installation.
-# the web server must be running by now in order to launch the installation
-
-- name: Install... Removing possibly old or incomplete config.php
-  file:
-    path: "{{ http_webroot }}/nextcloud/config/config.php"
-    state: absent
-
-- name: Install... Create custom_apps directory
-  file:
-    path: "{{ http_webroot }}/nextcloud/custom_apps"
-    state: directory
-    owner: "{{ nextcloud_websrv_user }}"
-    group: "{{ nextcloud_websrv_group }}"
-    mode: 0770
-
-- name: Install... Create data directory
-  file:
-    path: "{{ nc_data_dir }}"
-    state: directory
-    owner: "{{ nextcloud_websrv_user }}"
-    group: "{{ nextcloud_websrv_group }}"
-    mode: 0770
-
-- name: Install... Mount NFS directory (or not)
-  mount:
-    src: "{{ nfs_server }}:{{ nfs_path }}"
-    path: "{{ nc_data_dir }}"
-    state: mounted
-    fstype: nfs 
-    opts: nosharecache,context="system_u:object_r:httpd_sys_rw_content_t:s0"
-  when: nfs_data_dir
-
-- name: Install... First setup Nextcloud
-  become_user: "{{ nextcloud_websrv_user }}"
-  become: true
-  shell: "{{ php_bin }} occ maintenance:install --database=mysql --database-host={{ db_host }} --database-name={{ nc_db_name }} --database-user={{ nc_db_user }} --database-pass={{ nc_db_password }} --admin-user={{ nc_admin_name }} --admin-pass={{ nc_admin_pwd }} --data-dir={{ nc_data_dir }}"
-  args:
-    chdir: "{{ http_webroot }}/nextcloud"
-    creates: "{{ http_webroot }}/nextcloud/config/config.php"
-  register: setup_nc
-  when: "numbertables.stdout|int == 0"
-
-- name: Install... Removing possibly sample config
-  file:
-    path: "{{ http_webroot }}/nextcloud/config/config.sample.php"
-    state: absent

+ 30 - 0
dev/provisioning/ansible/roles/nextcloud/tasks/nc_setup.yml

@@ -8,6 +8,15 @@
     group: "{{ nextcloud_websrv_group }}"
     mode: 0640
 
+- name: Setup... Set background image
+  copy:
+    dest: "{{ http_webroot }}/nextcloud/apps/theming/img/background/pexels-jaymantri-5439.jpg"
+    src: files/pexels-jaymantri-5439.jpg
+    owner: "{{ nextcloud_websrv_user }}"
+    group: "{{ nextcloud_websrv_group }}"
+    mode: 0640
+  when: NEXTCLOUD_VERSION|string is version('25.0.1', '>=')
+
 - name: Setup... Set custom_apps config for Nextcloud
   copy:
     dest: "{{ http_webroot }}/nextcloud/config/apps.config.php"
@@ -86,9 +95,22 @@
     - "config:system:set activity_expire_days --value='30'"
     - "config:system:set simpleSignUpLink.shown --type=bool --value=false"
     #- "config:system:set share_folder --value='/Shared'"
+    - "theming:config url 'https://nextcloud.cism.ucl.ac.be'"
   loop_control:
     pause: 2
 
+- name: Setup... Applying recent settings
+  become_user: "{{ nextcloud_websrv_user }}"
+  become: true
+  shell: "{{ php_bin }} occ {{ item }}"
+  args:
+    chdir: "{{ http_webroot }}/nextcloud"
+  loop:
+    - "theming:config background {{ http_webroot }}/nextcloud/apps/theming/img/background/pexels-jaymantri-5439.jpg"
+  loop_control:
+    pause: 2
+  when: NEXTCLOUD_VERSION|string is version('25.0.1', '>=')
+
 - name: Setup... Set Nextcloud system settings in config.php
   become_user: "{{ nextcloud_websrv_user }}"
   become: true
@@ -106,6 +128,14 @@
     group: "{{ nextcloud_websrv_group }}"
     mode: 0640
 
+- name: Setup... Disabling filelocking because of Ceph
+  become_user: "{{ nextcloud_websrv_user }}"
+  become: true
+  shell: "{{ php_bin }} occ config:system:set filelocking.enabled --value=false"
+  args:
+    chdir: "{{ http_webroot }}/nextcloud"
+  when: "'ceph_servers' in groups.keys() or objectstore_s3_install"
+
 - name: Setup... Install Nextcloud Apps
   become_user: "{{ nextcloud_websrv_user }}"
   become: true

+ 61 - 0
dev/provisioning/ansible/roles/nextcloud/templates/NextcloudBackupRestore.conf.j2

@@ -0,0 +1,61 @@
+# Configuration for Nextcloud-Backup-Restore scripts
+
+# TODO: The main backup directory
+backupMainDir='/backup/nextcloud'
+
+# TODO: Use compression for file/data dir
+# When this is the only script for backups, it is recommend to enable compression.
+# If the output of this script is used in another (compressing) backup (e.g. borg backup),
+# you should probably disable compression here and only enable compression of your main backup script.
+useCompression=true
+
+# TOOD: The bare tar command for using compression while backup.
+# Use 'tar -cpzf' if you want to use gzip compression.
+compressionCommand='tar -I pigz -cpf'
+
+# TOOD: The bare tar command for using compression while restoring.
+# Use 'tar -xmpzf' if you want to use gzip compression.
+extractCommand='tar -I pigz -xmpf'
+
+# TODO: File names for backup files
+fileNameBackupFileDir='nextcloud-filedir.tar.gz'
+fileNameBackupDataDir='nextcloud-datadir.tar.gz'
+fileNameBackupExternalDataDir=''
+fileNameBackupDb='nextcloud-db.sql'
+
+# TODO: The directory of your Nextcloud installation (this is a directory under your web root)
+nextcloudFileDir='/var/www/html/nextcloud'
+
+# TODO: The directory of your Nextcloud data directory (outside the Nextcloud file directory)
+# If your data directory is located under Nextcloud's file directory (somewhere in the web root),
+# the data directory should not be a separate part of the backup
+nextcloudDataDir='/srv/data'
+
+# TODO: The directory of your Nextcloud's local external storage.
+# Uncomment if you use local external storage.
+#nextcloudLocalExternalDataDir='/var/nextcloud_external_data'
+
+# TODO: The service name of the web server. Used to start/stop web server (e.g. 'systemctl start <webserverServiceName>')
+webserverServiceName='httpd'
+
+# TODO: Your web server user
+webserverUser='apache'
+
+# TODO: The name of the database system (one of: mysql, mariadb, postgresql)
+databaseSystem='mysql'
+
+# TODO: Your Nextcloud database name
+nextcloudDatabase='nextcloudb'
+
+# TODO: Your Nextcloud database user
+dbUser='nextcloudb'
+
+# TODO: The password of the Nextcloud database user
+dbPassword='secret'
+
+# TODO: The maximum number of backups to keep (when set to 0, all backups are kept)
+maxNrOfBackups=10
+
+# TODO: Setting to include/exclude the backup directory of the Nextcloud updater
+# Set to true in order to include the backups of the Nextcloud updater
+includeUpdaterBackups=false

+ 17 - 0
dev/provisioning/ansible/roles/nextcloud/templates/objectstore_s3_config.php.j2

@@ -0,0 +1,17 @@
+<?php
+$CONFIG = array (
+  'objectstore' => [
+    'class' => '\\OC\\Files\\ObjectStore\\S3',
+    'arguments' => [
+      'bucket' => '{{ objectstore_s3_bucket_name | default('nextcloud') }}',
+      'autocreate' => true,
+      'key'    => '{{ objectstore_s3_key }}',
+      'secret' => '{{ objectstore_s3_secret }}',
+      'hostname' => '{{ objectstore_s3_hostname | default('s3.amazonaws.com') }}',
+      'port' => '{{ objectstore_s3_port | default('') }}',
+      'use_ssl' => {{ objectstore_s3_use_ssl | default('true') }},
+      'region' => '{{ objectstore_s3_region | default('us-east-1') }}',
+      'use_path_style' => {{ objectstore_s3_use_path_style | default('true') }}
+    ],
+  ],
+);

+ 0 - 1
dev/provisioning/ansible/roles/nextcloud/templates/redis.config.php.j2

@@ -3,7 +3,6 @@
 {% else %}
 <?php
 $CONFIG = array (
-  'filelocking.enabled' => true,
   'memcache.locking' => '\OC\Memcache\Redis',
   'redis' => array (
     'host' => '{{ redis_host }}',

+ 21 - 0
dev/provisioning/ansible/roles/nextcloud/vars/ldap_CISM.yml

@@ -0,0 +1,21 @@
+---
+
+nc_homeFolderNamingRule: "attr:uid"
+nc_ldapBackupHost: "ldaps://vldapcism2.cism.ucl.ac.be"
+nc_ldapBackupPort: 636
+nc_ldapBase: "dc=cism,dc=cluster"
+nc_ldapBaseGroups: "dc=cism,dc=cluster"
+nc_ldapBaseUsers: "dc=cism,dc=cluster"
+nc_ldapConfigurationActive: 1
+nc_ldapEmailAttribute: "mail"
+nc_ldapExpertUsernameAttr: "uid"
+nc_ldapGroupFilter: "(&(|(objectclass=posixGroup)))"
+nc_ldapGroupFilterObjectclass: "posixGroup"
+nc_ldapGroupMemberAssocAttr: "gidNumber"
+nc_ldapHost: "ldaps://vldapcism1.cism.ucl.ac.be"
+nc_ldapLoginFilter: "(&(|(objectclass=posixAccount))(uid=%uid))"
+nc_ldapPort: 636
+nc_ldapUserDisplayName: "cn"
+nc_ldapUserFilter: "(|(objectclass=posixAccount))"
+nc_ldapUserFilterMode: 1
+nc_ldapUserFilterObjectclass: "posixAccount"

+ 21 - 0
dev/provisioning/ansible/roles/nextcloud/vars/main.yml

@@ -15,3 +15,24 @@ nextcloud_max_upload_size_in_bytes: "{{ nextcloud_max_upload_size | human_to_byt
 # load configurations references
 os_config_ref: "{{ lookup('ansible.builtin.template', [role_path,'defaults','os_config_ref.yml']|join('/')) | from_yaml }}"
 php_config_ref: "{{ lookup('ansible.builtin.template', [role_path,'defaults','php_config_ref.yml']|join('/')) | from_yaml }}"
+
+# Default LDAP
+nc_homeFolderNamingRule: ""
+nc_ldapBackupHost: ""
+nc_ldapBackupPort: 636
+nc_ldapBase: ""
+nc_ldapBaseGroups: ""
+nc_ldapBaseUsers: ""
+nc_ldapConfigurationActive: 0
+nc_ldapEmailAttribute: ""
+nc_ldapExpertUsernameAttr: ""
+nc_ldapGroupFilter: ""
+nc_ldapGroupFilterObjectclass: ""
+nc_ldapGroupMemberAssocAttr: "gidNumber"
+nc_ldapHost: ""
+nc_ldapLoginFilter: ""
+nc_ldapPort: 636
+nc_ldapUserDisplayName: ""
+nc_ldapUserFilter: ""
+nc_ldapUserFilterMode: 0
+nc_ldapUserFilterObjectclass: ""

+ 5 - 0
dev/provisioning/ansible/roles/nfs/defaults/main.yml

@@ -0,0 +1,5 @@
+---
+nfs_exports: []
+
+nfs_rpcbind_state: started
+nfs_rpcbind_enabled: true

+ 3 - 0
dev/provisioning/ansible/roles/nfs/handlers/main.yml

@@ -0,0 +1,3 @@
+---
+- name: reload nfs
+  command: 'exportfs -ra'

+ 36 - 0
dev/provisioning/ansible/roles/nfs/tasks/main.yml

@@ -0,0 +1,36 @@
+---
+# Include variables and define needed variables.
+- name: Include OS-specific variables.
+  include_vars: "{{ ansible_os_family }}.yml"
+
+- name: Include overrides specific to Fedora.
+  include_vars: Fedora.yml
+  when:
+    - ansible_os_family == 'RedHat'
+    - ansible_distribution == "Fedora"
+
+# Setup/install tasks.
+- include_tasks: setup/RedHat.yml
+  when: ansible_os_family == 'RedHat'
+
+- include_tasks: setup/Debian.yml
+  when: ansible_os_family == 'Debian'
+
+- name: Ensure directories to export exist
+  file:  # noqa 208
+    path: "{{ item.strip().split()[0] }}"
+    state: directory
+  with_items: "{{ nfs_exports }}"
+
+- name: Copy exports file.
+  template:
+    src: exports.j2
+    dest: /etc/exports
+    owner: root
+    group: root
+    mode: 0644
+  notify: reload nfs
+
+- name: Ensure nfs is running.
+  service: "name={{ nfs_server_daemon }} state=started enabled=yes"
+  when: nfs_exports|length

+ 7 - 0
dev/provisioning/ansible/roles/nfs/tasks/setup/Debian.yml

@@ -0,0 +1,7 @@
+---
+- name: Ensure NFS utilities are installed.
+  apt:
+    name:
+      - nfs-common
+      - nfs-kernel-server
+    state: present

+ 9 - 0
dev/provisioning/ansible/roles/nfs/tasks/setup/RedHat.yml

@@ -0,0 +1,9 @@
+---
+- name: Ensure NFS utilities are installed.
+  package: name=nfs-utils state=present
+
+- name: Ensure rpcbind is running as configured.
+  service:
+    name: rpcbind
+    state: "{{ nfs_rpcbind_state }}"
+    enabled: "{{ nfs_rpcbind_enabled }}"

+ 9 - 0
dev/provisioning/ansible/roles/nfs/tasks/setup/Rocky.yml

@@ -0,0 +1,9 @@
+---
+- name: Ensure NFS utilities are installed.
+  package: name=nfs-utils state=present
+
+- name: Ensure rpcbind is running as configured.
+  service:
+    name: rpcbind
+    state: "{{ nfs_rpcbind_state }}"
+    enabled: "{{ nfs_rpcbind_enabled }}"

+ 13 - 0
dev/provisioning/ansible/roles/nfs/templates/exports.j2

@@ -0,0 +1,13 @@
+# /etc/exports: the access control list for filesystems which may be exported
+#   to NFS clients.  See exports(5).
+#
+# Example for NFSv2 and NFSv3:
+# /srv/homes       hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
+#
+# Example for NFSv4:
+# /srv/nfs4        gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
+# /srv/nfs4/homes  gss/krb5i(rw,sync,no_subtree_check)
+#
+{% for export in nfs_exports %}
+{{ export }}
+{% endfor %}

+ 2 - 0
dev/provisioning/ansible/roles/nfs/vars/Debian.yml

@@ -0,0 +1,2 @@
+---
+nfs_server_daemon: nfs-kernel-server

+ 2 - 0
dev/provisioning/ansible/roles/nfs/vars/Fedora.yml

@@ -0,0 +1,2 @@
+---
+nfs_server_daemon: nfs-server

+ 2 - 0
dev/provisioning/ansible/roles/nfs/vars/RedHat.yml

@@ -0,0 +1,2 @@
+---
+nfs_server_daemon: nfs-server

+ 2 - 0
dev/provisioning/ansible/roles/nfs/vars/Rocky.yml

@@ -0,0 +1,2 @@
+---
+nfs_server_daemon: nfs-server

+ 2 - 2
dev/provisioning/ansible/roles/proxysql/tasks/main.yml

@@ -7,7 +7,7 @@
     my_service: {"name": "proxysql.service", "source": "systemd", "state": "unknown", "status": "disabled"}
 
 - name: Include OS specific variables.
-  include_vars: "{{ ansible_os_family }}.yml"
+  include_vars: "{{ ansible_distribution }}.yml"
 
 - name: collect facts about system services
   service_facts:
@@ -23,7 +23,7 @@
     var: my_service
 
 - name: Install proxySQL
-  include_tasks: "setup/{{ ansible_os_family }}.yml"
+  include_tasks: "setup/{{ ansible_distribution }}.yml"
 
 - name: Install proxysql scripts
   copy:

+ 1 - 1
dev/provisioning/ansible/roles/redis/tasks/main.yml

@@ -10,7 +10,7 @@
         redis_list_servers: "{{ groups['test_servers'] | list }}"
 
 # include os specific tasks
-- include_tasks: "setup/{{ ansible_os_family }}.yml"
+- include_tasks: "setup/{{ ansible_distribution }}.yml"
 
 - name: Ensure Redis configuration dir exists.
   file:

+ 3 - 2
dev/provisioning/ansible/roles/web_php/tasks/main.yml

@@ -7,6 +7,7 @@
 - name: Main... collect facts about system services
   service_facts:
   register: services_state
+  no_log: true
 
 - name: Main... Set nb_web_servers based on web_servers group
   set_fact:
@@ -28,9 +29,9 @@
     var: my_redis_service
   when: ('%s.service' % (redis_daemon)) in ansible_facts.services.keys()
 
-- include_tasks: "web/{{ ansible_os_family }}.yml"
+- include_tasks: "web/{{ ansible_distribution }}.yml"
 
-- include_tasks: "php/{{ ansible_os_family }}.yml"
+- include_tasks: "php/{{ ansible_distribution }}.yml"
 
   #- include_tasks: "ssl.yml"
 

+ 1 - 0
dev/provisioning/ansible/roles/web_php/tasks/web/Rocky.yml

@@ -53,6 +53,7 @@
     proto: tcp
     setype: http_port_t
     state: present
+  when: ansible_selinux.status == "enabled"
 
 - name: Start {{ http_service_name }} service
   service:

+ 3 - 3
dev/provisioning/bash/common.sh

@@ -4,14 +4,14 @@ if [[ -z "$1" ]]; then
   echo "!! box variable not set !!"
   exit 1
 fi
-if [[ $1 == centos* ]]; then
+if [[ "$1" == *"centos"* ]]; then
    pkgmanager="yum"
    yum install -y epel-release
    yum install -y python3 dnf policycoreutils-python python2-cryptography libselinux-python3  
    yum install -y epel-release
-elif [[ $1 == ubuntu* ]]; then
+elif [[ "$1" == *"ubuntu"* ]]; then
   pkgmanager="apt-get"
-elif [[ $1 == rocky* ]]; then
+elif [[ "$1" == *"rocky"* ]]; then
   pkgmanager="dnf"
   dnf -y install epel-release
   dnf -y install python3 policycoreutils-python-utils python3-cryptography libselinux-python3

+ 18 - 0
dev/provisioning/bash/install-docker.sh

@@ -0,0 +1,18 @@
+# install-docker.sh
+
+#!/bin/sh
+
+set -o errexit
+set -o nounset
+
+IFS=$(printf '\n\t')
+
+apt remove --yes docker docker-engine docker.io containerd runc || true
+apt update
+apt --yes --no-install-recommends install apt-transport-https ca-certificates
+wget --quiet --output-document=- https://download.docker.com/linux/ubuntu/gpg | apt-key add -
+add-apt-repository --yes "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release --codename --short) stable"
+apt update
+apt --yes --no-install-recommends install docker-ce docker-ce-cli containerd.io
+systemctl enable docker
+printf '\nDocker installed successfully\n\n'

+ 15 - 0
dev/tools/box-metadata.json

@@ -0,0 +1,15 @@
+{
+  "name" : "rockylinux/8",
+  "description" : "Rocky Linux 8 7.0.0 Bugfix",
+  "versions" : [
+    {
+      "version" : "7.0.1-20221213.0",
+      "providers" : [
+        {
+          "name" : "virtualbox",
+          "url" : "http://dl.rockylinux.org/pub/rocky/8/images/x86_64/Rocky-8-Vagrant-Vbox-8.7-20221213.0.x86_64.box"
+        }
+      ]
+    }
+  ]
+}

+ 86 - 0
dev/tools/nextcloud-S3-migration/README.md

@@ -0,0 +1,86 @@
+# nextcloud local S3 migration
+
+Script for migrating Nextcloud primary storage from local to S3 storage
+:floppy_disk: to :cloud:
+
+Fork from [mrAceT](https://github.com/mrAceT/nextcloud-S3-local-S3-migration) 
+> Revision `main` - 4ab29ef
+
+## local to S3
+
+It will transfer files from **local** based primary storage to a **S3** primary storage.
+
+Before you start, it is probably wise to set $DO_FILES_CLEAN (occ files:cleanup)
+and $DO_FILES_SCAN (occ files:scan --all) to '1' once, let the 'Nextcloud' do some checking.. then you'll start out as clean as possible
+
+0. make sure that the Nextcloud cron job is disabled and make sure that you local data is sufficiently large
+
+1. go to the nextcloud root folder and install composer
+
+```bash
+wget https://getcomposer.org/installer
+mv installer composer-setup.php
+mkdir bin
+php composer-setup.php --install-dir /var/www/html/nextcloud/bin
+chown -R apache:apache bin
+rm -f composer-setup.php
+chmod 755 bin/*
+```
+
+2. install 'aws/aws-sdk-php'
+
+```bash
+runuser -u apache -- ./bin/composer.phar require aws/aws-sdk-php
+```
+
+3. place 'storage.config.php', 'localtos3.php' and 'tests3.php' in the nextcloud root folder (and set your S3 credentials!)
+4. set & check all the config variables in the beginning of the scripts!
+5. check your S3 config 
+
+```bash
+sudo -u apache php81 -d memory_limit=1024M tests3.php
+```
+
+6. start with the highest $TEST => 2 (complete dry run, just checks en dummy uploads etc. NO database changes what so ever!)
+
+```bash
+sudo -u apache php81 -d memory_limit=1024M localtos3.php
+```
+
+7. set $TEST to a 'small test user", upload the data to S3 for only that user (NO database changes what so ever!)
+8. set $TEST to 1 and run the script yet again, upload (**and check**) all the data to S3 (NO database changes what so ever!)
+9. set $TEST to 0 and run the script again (this is LIVE, nextcloud will be set into maintenance:mode --on while working ! **database changes!**)
+
+> **DO NOT** skip ahead and go live ($TEST=0) as the first step.. then your downtime will be very long!
+
+With performing 'the move' at step 8 you will decrease the downtime (with maintenance mode:on) immensely!
+This because the script will first check if it already has uploaded the latest file, then it can skip to the next and does not need to (slowly) upload it to your S3 bucket!
+With a litte luck the final run (with $TEST=0) can be done within a minute!
+
+> **NOTE** step 8 will take a very long time when you have a lot of data to upload!
+
+If everything worked you might want to delete the data in data folder.
+Also you probably want to delete this script (and the 'storage.config.php') after running it.
+If all went as it should the config data in 'storage.config.php' is included in the 'config/config.php'. Then the 'storage.config.php' can also be removed from your config folder (no sense in having a double config)
+
+## S3 sanity check!
+
+When you
+
+1. have S3 as your primary storage
+2. set $TEST to 0
+3. **optionally** set $SET_MAINTENANCE to 0
+4. (have set/checked all the other variables..)
+
+Then the script 'localtos3.php' will:
+- look for entries in S3 and not in the database and vice versa **and remove them**.
+This can happen sometimes upon removing an account, preview files might not get removed.. stuff like that..
+
+- check for canceled uploads.
+Inspired upon [otherguy/nextcloud-cleanup](https://github.com/otherguy/nextcloud-cleanup/blob/main/clean.php). I have not had this problem, so can not test.. => check only!
+
+- preview cleanup.
+Removes previews of files that no longer exist.
+There is some initial work for clearing previews.. that is a work in progress, use at your own risc!
+
+The script will do the "sanity check" when migrating also (we want a good and clean migrition, won't we? ;)

+ 895 - 0
dev/tools/nextcloud-S3-migration/localtos3.php

@@ -0,0 +1,895 @@
+<?php
+/* *********************************************************************************** */
+/*        2023 code created by Eesger Toering / knoop.frl / geoarchive.eu              */
+/*        GitHub: https://github.com/mrAceT/nextcloud-S3-local-S3-migration            */
+/*     Like the work? You'll be surprised how much time goes into things like this..   */
+/*                            be my hero, support my work,                             */
+/*                     https://paypal.me/eesgertoering                                 */
+/*                     https://www.geef.nl/en/donate?action=15544                      */
+/* *********************************************************************************** */
+
+# best practice: run the script as the cloud-user!!
+# sudo -u apache php81 -d memory_limit=1024M /var/www/html/nextcloud/localtos3.php
+
+# runuser -u apache -- composer require aws/aws-sdk-php
+use Aws\S3\S3Client;
+
+# uncomment this for large file uploads (Amazon advises this voor 100Mb+ files)
+#use Aws\S3\MultipartUploader;
+#$MULTIPART_THRESHOLD = 500; #Megabytes
+
+echo "\n#########################################################################################".
+     "\n Migration tool for Nextcloud local to S3 version 0.35".
+     "\n".
+     "\n Reading config...";
+
+$PREVIEW_MAX_AGE = 0; // max age (days) of preview images (EXPERIMENTAL! 0 = no del)
+$PREVIEW_MAX_DEL = 0.005; // max amount of previews to delete at a time (when < 1 & > 0 => percentage! )..
+
+// Note: Preferably use absolute path without trailing directory separators
+$PATH_BASE      = '/var/www/html/nextcloud'; // Path to the base of the main Nextcloud directory
+
+$PATH_NEXTCLOUD = $PATH_BASE; // Path of the public Nextcloud directory
+
+$PATH_BACKUP    = $PATH_BASE.'/bak'; // Path for backup of MySQL database (you must create it yourself..)
+
+$OCC_BASE       = 'php81 -d memory_limit=1024M '.$PATH_NEXTCLOUD.'/occ ';
+// don't forget this one --. (if you don't run the script as the 'clouduser', see first comment at the top)
+#$OCC_BASE       = 'sudo -u apache php81 -d memory_limit=1024M '.$PATH_NEXTCLOUD.'/occ ';
+
+$TEST = '2'; //'admin';//'appdata_oczvcie123w4';
+// set to 0 for LIVE!!
+// set to 1 for all data : NO db modifications, with file modifications/uplaods/removal
+// set to user name for single user (migration) test
+// set to 2 for complete dry run
+
+$SET_MAINTENANCE = 1; // only in $TEST=0 Nextcloud will be put into maintenance mode
+// ONLY when migration is all done you can set this to 0 for the S3-consitancy checks
+
+$SHOWINFO = 1; // set to 0 to force much less info (while testing)
+
+$SQL_DUMP_USER = ''; // leave both empty if nextcloud user has enough rights..
+$SQL_DUMP_PASS = '';
+
+$CONFIG_OBJECTSTORE = dirname(__FILE__).'/storage.config.php';
+
+# It is probably wise to set the two vars below to '1' once, let the 'Nextcloud' do some checking..
+$DO_FILES_CLEAN = 0; // perform occ files:cleanup    | can take a while on large accounts (should not be necessary but cannot hurt / not working while in maintenance.. )
+$DO_FILES_SCAN  = 0; // perform occ files:scan --all | can take a while on large accounts (should not be necessary but cannot hurt / not working while in maintenance.. )
+
+echo "\n".
+     "\n#########################################################################################".
+     "\nSetting up local migration to S3 (sync)...\n";
+
+// Autoload
+require_once(dirname(__FILE__).'/vendor/autoload.php');
+
+echo "\nfirst load the nextcloud config...";
+include($PATH_NEXTCLOUD.'/config/config.php');
+if (!empty($CONFIG['objectstore'])) {
+  if ($CONFIG_OBJECTSTORE == $PATH_NEXTCLOUD.'/config/config.php') {
+    echo "\nS3 config found in \$PATH_NEXTCLOUD system config.php => same as \$CONFIG_OBJECTSTORE !";
+  } else {
+    echo "\nS3 config found in \$PATH_NEXTCLOUD system config.php => \$CONFIG_OBJECTSTORE not used! ($CONFIG_OBJECTSTORE)";
+  }
+  $CONFIG_OBJECTSTORE = ''; //no copy!
+} else {
+  echo "\nS3 NOT configured in config.php, using \$CONFIG_OBJECTSTORE";
+  if (is_string($CONFIG_OBJECTSTORE) && file_exists($CONFIG_OBJECTSTORE)) {
+    $CONFIG_MERGE = $CONFIG;
+    include($CONFIG_OBJECTSTORE);
+    $CONFIG = array_merge($CONFIG_MERGE,$CONFIG);
+  }
+  else if (is_array($CONFIG_OBJECTSTORE)) {
+    $CONFIG['objectstore'] = $CONFIG_OBJECTSTORE;
+  } else {
+    echo "\nERROR: var \$CONFIG_OBJECTSTORE is not configured (".gettype($CONFIG_OBJECTSTORE)." / $CONFIG_OBJECTSTORE)\n\n";
+    die;
+  }
+}
+$PATH_DATA = preg_replace('/\/*$/','',$CONFIG['datadirectory']);
+
+echo "\nconnect to sql-database...";
+// Database setup
+$mysqli = new mysqli($CONFIG['dbhost'], $CONFIG['dbuser'], $CONFIG['dbpassword'], $CONFIG['dbname']);
+if ($CONFIG['mysql.utf8mb4']) {
+  $mysqli->set_charset('utf8mb4');
+}
+
+################################################################################ checks #
+$LOCAL_STORE_ID = 0;
+if ($result = $mysqli->query("SELECT * FROM `oc_storages` WHERE `id` = 'local::$PATH_DATA/'")) {
+  if ($result->num_rows>1) {
+    echo "\nERROR: Multiple 'local::$PATH_DATA', it's an accident waiting to happen!!\n";
+    die;
+  }
+  else if ($result->num_rows == 1) {
+    echo "\nFOUND 'local::$PATH_DATA', good. ";
+    $row = $result->fetch_assoc();
+    $LOCAL_STORE_ID = $row['numeric_id']; // for creative rename command..
+    echo "\nThe local store  id is:$LOCAL_STORE_ID";
+  } else {
+    echo "\nWARNING: no 'local::$PATH_DATA' found, therefor no sync local data > S3!\n";
+  }
+}
+$OBJECT_STORE_ID = 0;
+if ($result = $mysqli->query("SELECT * FROM `oc_storages` WHERE `id` LIKE 'object::store:amazon::".$CONFIG['objectstore']['arguments']['bucket']."'")) {
+  if ($result->num_rows>1) {
+    echo "\nMultiple 'object::store:amazon::".$CONFIG['objectstore']['arguments']['bucket']."' clean this up, it's an accident waiting to happen!!\n\n";
+    die;
+  }
+  else if ($result->num_rows == 0) {
+    if (empty($CONFIG['objectstore'])) {
+      echo "\nERROR: No 'object::store:' & NO S3 storage defined\n\n";
+      die;
+    } else {
+      echo "\nNOTE: No 'object::store:' > S3 storage  = defined\n\n";
+      echo "\n Upon migration local will be renamed to object::store";
+    }
+  }
+  else {
+    echo "\nFOUND 'object::store:amazon::".$CONFIG['objectstore']['arguments']['bucket']."', OK";
+    $row = $result->fetch_assoc();
+    $OBJECT_STORE_ID = $row['numeric_id']; // for creative rename command..
+    echo "\nThe object store id is:$OBJECT_STORE_ID";
+    
+    $result = $mysqli->query("SELECT `fileid` FROM `oc_filecache` WHERE `storage` = ".$OBJECT_STORE_ID);
+    if ( $result->num_rows > 0 ) {
+      echo "\n\nWARNING: if this is for a full migration remove all data with `storage` = $OBJECT_STORE_ID in your `oc_filecache` !!!!\n";
+    }
+    
+  }
+}
+$result->free_result();
+
+echo "\n".
+     "\n######################################################################################### ".$TEST;
+if (empty($TEST) ) {
+  echo "\n\nNOTE: THIS IS THE REAL THING!!\n";
+} else {
+  echo empty($TEST)          ? '' : "\nWARNING: you are in test mode (".$TEST.")";
+}
+echo "\nBase init complete, continue?";
+$getLine = '';
+while ($getLine == ''): $getLine = fgets( fopen("php://stdin","r") ); endwhile;
+
+echo "\n######################################################################################### ";
+
+if ($DO_FILES_CLEAN) {
+  echo "\nRunning cleanup (should not be necessary but cannot hurt)";
+  echo occ($OCC_BASE,'files:cleanup');
+}
+if ($DO_FILES_SCAN) {
+  echo "\nRunning scan (should not be necessary but cannot hurt)";
+  echo occ($OCC_BASE,'files:scan --all');
+}
+
+if (empty($TEST)) {
+  if ($SET_MAINTENANCE) { // maintenance mode
+    $process = occ($OCC_BASE,'maintenance:mode --on');
+    echo $process;
+    if (strpos($process, "\nMaintenance mode") == 0
+     && strpos($process, 'Maintenance mode already enabled') == 0) {
+      echo " could not set..  ouput command: ".$process."\n\n";
+      die;
+    }
+  }
+} else {
+  echo "\n\nNOTE: In TEST-mode, will not enter maintenance mode";
+}
+
+echo "\ndatabase backup...";
+if (!is_dir($PATH_BACKUP)) { echo "\$PATH_BACKUP folder does not exist\n"; die; }
+
+$process = shell_exec('mysqldump --host='.$CONFIG['dbhost'].
+                               ' --user='.(empty($SQL_DUMP_USER)?$CONFIG['dbuser']:$SQL_DUMP_USER).
+                               ' --password='.escapeshellcmd( empty($SQL_DUMP_PASS)?$CONFIG['dbpassword']:$SQL_DUMP_PASS ).' '.$CONFIG['dbname'].
+                               ' > '.$PATH_BACKUP . DIRECTORY_SEPARATOR . 'backup.sql');
+if (strpos(' '.strtolower($process), 'error:') > 0) {
+  echo "sql dump error\n";
+  die;
+} else {
+  echo "\n(to restore: mysql -u ".(empty($SQL_DUMP_USER)?$CONFIG['dbuser']:$SQL_DUMP_USER)." -p ".$CONFIG['dbname']." < backup.sql)\n";
+}
+
+echo "\nbackup config.php...";
+$copy = 1;
+if(file_exists($PATH_BACKUP.'/config.php')){
+  if (filemtime($PATH_NEXTCLOUD.'/config/config.php') > filemtime($PATH_BACKUP.'/config.php') ) {
+    unlink($PATH_BACKUP.'/config.php');
+  }
+  else {
+    echo 'not needed';
+    $copy = 0;
+  }
+}
+if ($copy) {
+  copy($PATH_NEXTCLOUD.'/config/config.php', $PATH_BACKUP.'/config.php');
+}
+
+echo "\nconnect to S3...";
+$bucket = $CONFIG['objectstore']['arguments']['bucket'];
+$s3 = new S3Client([
+    'version' => 'latest',
+    //'endpoint' => 'https://'.$bucket.'.'.$CONFIG['objectstore']['arguments']['hostname'],
+    'endpoint' => 'http://'.$CONFIG['objectstore']['arguments']['hostname'],
+    //'bucket' => $bucket,
+    //'bucket_endpoint' => true,
+    'region'  => $CONFIG['objectstore']['arguments']['region'],
+    'credentials' => [
+        'key' => $CONFIG['objectstore']['arguments']['key'],
+        'secret' => $CONFIG['objectstore']['arguments']['secret'],
+    ],
+    'use_path_style_endpoint' => $CONFIG['objectstore']['arguments']['use_path_style']
+]);
+
+echo "\n".
+     "\n#########################################################################################".
+     "\nSetting everything up finished ##########################################################";
+
+echo "\n".
+     "\n#########################################################################################".
+     "\nappdata preview size...";
+$PREVIEW_MAX_AGEU = 0;
+$PREVIEW_1YR_AGEU = 0;
+if ($PREVIEW_MAX_AGE > 0) {
+  echo "\nremove older then ".$PREVIEW_MAX_AGE." day".($PREVIEW_MAX_AGE>1?'s':'');
+  
+  $PREVIEW_MAX_AGEU = new DateTime(); // For today/now, don't pass an arg.
+  $PREVIEW_MAX_AGEU->modify("-".$PREVIEW_MAX_AGE." day".($PREVIEW_MAX_AGE>1?'s':''));
+  echo " > clear before ".$PREVIEW_MAX_AGEU->format( 'd-m-Y' )." (U:".$PREVIEW_MAX_AGEU->format( 'U' ).")";
+  $PREVIEW_MAX_AGEU = $PREVIEW_MAX_AGEU->format( 'U' );
+
+} else {
+  echo " (\$PREVIEW_MAX_AGE = 0 days, stats only)";
+}
+$PREVIEW_1YR_AGEU = new DateTime(); // For today/now, don't pass an arg.
+$PREVIEW_1YR_AGEU->modify("-1year");
+$PREVIEW_1YR_AGEU = $PREVIEW_1YR_AGEU->format( 'U' );
+
+$PREVIEW_NOW = [0,0];
+$PREVIEW_DEL = [0,0];
+$PREVIEW_REM = [0,0];
+$PREVIEW_1YR = [0,0];
+
+if (!$result = $mysqli->query("SELECT `ST`.`id`, `FC`.`fileid`, `FC`.`path`, `FC`.`size`, `FC`.`storage_mtime` FROM".
+                             " `oc_filecache` as `FC`,".
+                             " `oc_storages`  as `ST`,".
+                             " `oc_mimetypes` as `MT`".
+                             " WHERE 1".
+                              " AND `FC`.`path`    LIKE 'appdata_%'".
+                              " AND `FC`.`path`    LIKE '%/preview/%'".
+#                              " AND `ST`.`id` LIKE 'object::%'".
+#                              " AND `FC`.`fileid` = '".substr($object['Key'],8)."'". # should be only one..
+
+                              " AND `ST`.`numeric_id` = `FC`.`storage`".
+                              " AND `FC`.`mimetype`   = `MT`.`id`".
+                              " AND `MT`.`mimetype`  != 'httpd/unix-directory'".
+                             " ORDER BY `FC`.`storage_mtime` ASC")) {
+  echo "\nERROR: query pos 1";
+  die;
+} else {
+  if ($PREVIEW_MAX_DEL > 0
+   && $PREVIEW_MAX_DEL < 1) {
+    $PREVIEW_MAX_DEL*= $result->num_rows;
+  }
+  while ($row = $result->fetch_assoc()) {
+    // Determine correct path
+    if (substr($row['id'], 0, 13) == 'object::user:') {
+      $path = $PATH_DATA . DIRECTORY_SEPARATOR . substr($row['id'], 13) . DIRECTORY_SEPARATOR . $row['path'];
+    }
+    else if (substr($row['id'], 0, 6) == 'home::') {
+      $path = $PATH_DATA . DIRECTORY_SEPARATOR . substr($row['id'], 6) . DIRECTORY_SEPARATOR . $row['path'];
+    } else {
+      $path = $PATH_DATA . DIRECTORY_SEPARATOR . $row['path'];
+    }
+    $user = substr($path, strlen($PATH_DATA. DIRECTORY_SEPARATOR));
+    $user = substr($user,0,strpos($user,DIRECTORY_SEPARATOR));
+
+    if ($PREVIEW_MAX_AGEU > $row['storage_mtime']
+     && $PREVIEW_MAX_DEL > 1) {
+      $PREVIEW_MAX_DEL--;
+      if (empty($TEST)) {
+        if(file_exists($path) && is_file($path)){
+          unlink($path);
+        }
+        $result_s3 =  S3del($s3, $bucket, 'urn:oid:'.$row['fileid']);
+        $mysqli->query("DELETE FROM `oc_filecache` WHERE `oc_filecache`.`fileid` = ".$row['fileid']);
+      } else {
+        echo "\nfileID ".$matches[2]." has a preview older then the set \$PREVIEW_MAX_AGE";
+      }
+      $PREVIEW_DEL[1] += $row['size'];
+      $PREVIEW_DEL[0]++;
+    } else {
+      if (preg_match('/\/preview\/([a-f0-9]\/[a-f0-9]\/[a-f0-9]\/[a-f0-9]\/[a-f0-9]\/[a-f0-9]\/[a-f0-9]\/)?([0-9]+)\/[^\/]+$/',$path,$matches)) {
+        #echo "check fileID".$matches[2].' ';
+        $result2 = $mysqli->query("SELECT `storage` FROM `oc_filecache` WHERE `oc_filecache`.`fileid` = ".$matches[2]);
+        if ($result2->num_rows == 0 ) {
+          if (empty($TEST)) {
+            if(file_exists($path) && is_file($path)){
+              unlink($path);
+            }
+            $result_s3 =  S3del($s3, $bucket, 'urn:oid:'.$row['fileid']);
+            $mysqli->query("DELETE FROM `oc_filecache` WHERE `oc_filecache`.`fileid` = ".$row['fileid']);
+          } else {
+            echo "\nfileID ".$matches[2]." has a preview, but the source file does not exist, would delete the preview (fileID ".$row['fileid'].")";
+          }
+          $PREVIEW_REM[0]++;
+          $PREVIEW_REM[1] += $row['size'];
+        } else {
+          if ($PREVIEW_1YR_AGEU > $row['storage_mtime'] ) {
+            $PREVIEW_1YR[1] += $row['size'];
+            $PREVIEW_1YR[0]++;
+          }
+          $PREVIEW_NOW[1] += $row['size'];
+          $PREVIEW_NOW[0]++;
+        }
+        $result2->free_result();
+      } else {
+        echo "\n\nERROR:  path format not as expected (".$row['fileid']." : $path)";
+        echo "\n\tremove the database entry..";
+        if (empty($TEST)) {
+          $mysqli->query("DELETE FROM `oc_filecache` WHERE `oc_filecache`.`fileid` = ".$row['fileid']);
+        }
+        else {
+          echo " ONLY with \$TEST = 0 the DB entry will be removed!";
+        }
+        echo "\n";
+      }
+    }
+    
+  }
+  $result->free_result();
+}
+
+if ($PREVIEW_DEL[0] > 0
+ || $PREVIEW_REM[0] > 0) {
+  echo "\nappdata preview size before :".sprintf('% 8.2f',($PREVIEW_NOW[1]+$PREVIEW_DEL[1])/1024/1024)." Mb\t(".($PREVIEW_NOW[0]+$PREVIEW_DEL[0])." files)";
+  echo "\nappdata preview > 1 year old:".sprintf('% 8.2f',($PREVIEW_1YR[1])/1024/1024)." Mb\t(".$PREVIEW_1YR[0]." files)";
+  echo "\nappdata preview size cleared:".sprintf('% 8.2f',($PREVIEW_DEL[1])/1024/1024)." Mb\t(".$PREVIEW_DEL[0]." files".($PREVIEW_MAX_DEL<1?' MAX DEL ':'').")";
+  echo "\nappdata preview size cleared:".sprintf('% 8.2f',($PREVIEW_DEL[1])/1024/1024)." Mb\t(".$PREVIEW_DEL[0]." files".($PREVIEW_MAX_DEL<1?' MAX DEL ':'').")";
+  echo "\nappdata preview size now    :".sprintf('% 8.2f',($PREVIEW_NOW[1])/1024/1024)." Mb\t(".$PREVIEW_NOW[0]." files";
+  if ($PREVIEW_NOW[1]+$PREVIEW_DEL[1] > 0 ) {
+    echo "/ -".floor(($PREVIEW_DEL[1]+$PREVIEW_REM[1])/($PREVIEW_NOW[1]+$PREVIEW_DEL[1])+.5)."%";
+  }
+  echo ")";
+  if (!empty($TEST)) {
+    echo "\n\nNOTE: in TEST-mode, no preview-data has been cleared!";
+  }
+} else {
+  echo "\nappdata preview size        :".sprintf('% 8.2f',($PREVIEW_NOW[1])/1024/1024)." Mb\t(".$PREVIEW_NOW[0]." files)";
+  echo "\nappdata preview > 1 year old:".sprintf('% 8.2f',($PREVIEW_1YR[1])/1024/1024)." Mb\t(".$PREVIEW_1YR[0]." files)";
+}
+
+echo "\n".
+     "\n#########################################################################################".
+     "\nread files in S3...";
+$objects = S3list($s3, $bucket);
+
+$objectIDs     = array();
+$objectIDsSize = 0;
+$users         = array();
+
+if (is_string($objects)) {
+  echo $objects; # error..
+  die;
+}
+else {
+  echo "\nObjects to process in S3: ".count($objects).' ';
+  $S3_removed = [0,0];
+  $S3_updated = [0,0];
+  $S3_skipped = [0,0];
+
+  // Init progress
+  $complete = count($objects);
+  $prev     = '';
+  $current  = 0;
+  
+  $showinfo = !empty($TEST);
+  $showinfo = $SHOWINFO ? $showinfo : 0;
+  
+  foreach ($objects as $object) {
+    $current++;
+    $infoLine = "\n".$current."  /  ".substr($object['Key'],8)."\t".$object['Key'] . "\t" . $object['Size'] . "\t" . $object['LastModified'] . "\t";
+
+    if (!$result = $mysqli->query("SELECT `ST`.`id`, `FC`.`fileid`, `FC`.`path`, `FC`.`storage_mtime`, `FC`.`size` FROM".
+                                 " `oc_filecache` AS `FC`,".
+                                 " `oc_storages`  AS `ST`,".
+                                 " `oc_mimetypes` AS `MT`".
+                                 " WHERE 1".
+   #                              " AND st.id LIKE 'object::%'".
+                                  " AND `FC`.`fileid` = '".substr($object['Key'],8)."'". # should be only one..
+
+                                  " AND `ST`.`numeric_id` = `FC`.`storage`".
+                                  " AND `FC`.`mimetype`   = `MT`.`id`".
+                                  " AND `MT`.`mimetype`  != 'httpd/unix-directory'".
+                                 " ORDER BY `FC`.`path` ASC")) {#
+      echo "\nERROR: query pos 2";
+      die;
+    } else {
+      if ($result->num_rows>1) {
+        echo "\ndouble file found in oc_filecache, this can not be!?\n";
+        die;
+      }
+      else if ($result->num_rows == 0) { # in s3, not in db, remove from s3
+        if ($showinfo) { echo $infoLine."\nID:".$object['Key']."\ton S3, but not in oc_filecache, remove..."; }
+        if (!empty($TEST) && $TEST == 2) {
+          echo ' not removed ($TEST = 2)';
+        } else {
+          $result_s3 =  S3del($s3, $bucket, $object['Key']);
+          if ($showinfo) { echo 'S3del:'.$result_s3; }
+        }
+        $S3_removed[0]++;
+        $S3_removed[1]+=$object['Size'];
+      }
+      else { # one match, up to date?
+        $row = $result->fetch_assoc();
+
+        // Determine correct path
+        if (substr($row['id'], 0, 13) == 'object::user:') {
+        $path = $PATH_DATA . DIRECTORY_SEPARATOR . substr($row['id'], 13) . DIRECTORY_SEPARATOR . $row['path'];
+        }
+        else if (substr($row['id'], 0, 6) == 'home::') {
+          $path = $PATH_DATA . DIRECTORY_SEPARATOR . substr($row['id'], 6) . DIRECTORY_SEPARATOR . $row['path'];
+        } else {
+          $path = $PATH_DATA . DIRECTORY_SEPARATOR . $row['path'];
+        }
+        $user = substr($path, strlen($PATH_DATA. DIRECTORY_SEPARATOR));
+        $user = substr($user,0,strpos($user,DIRECTORY_SEPARATOR));
+        $users[ $user ] = 1;
+
+        $infoLine.= $user. "\t";
+
+        # just for one user? set test = appdata_oczvcie795w3 (system wil not go to maintenance nor change database, just test and copy data!!)
+        if (is_numeric($TEST) || $TEST == $user ) {
+          #echo "\n".$path."\t".$row['storage_mtime'];
+          if(file_exists($path) && is_file($path)){
+            if ($row['storage_mtime'] < filemtime($path) ) {
+              if ($showinfo) { echo $infoLine."\nID:".$object['Key']."\ton S3, but is older then local, upload..."; }
+              if (!empty($TEST) && $TEST == 2) {
+                echo ' not uploaded ($TEST = 2)';
+              } else {
+                $result_s3 =  S3put($s3, $bucket,[
+                                          'Key' => 'urn:oid:'.$row['fileid'],
+                                          #'Body'=> "Hello World!!",
+                                          'SourceFile' => $path,
+                                          'ACL' => 'private'//public-read'
+                                        ]);
+                if ($showinfo) { echo 'S3put:'.$result_s3; }
+              }
+              $S3_updated[0]++;
+              $S3_updated[1]+=$row['size'];
+            } else {
+              $objectIDs[ $row['fileid'] ] = 1;
+              $objectIDsSize+=$row['size'];
+#              if ($showinfo) { echo $infoLine."OK (".$row['fileid']." / ".(count($objectIDs)).")"; }
+            }
+          } else {
+            $objectIDs[ $row['fileid'] ] = 1;
+            $objectIDsSize+=$row['size'];
+#            if ($showinfo) { echo $infoLine."OK-S3 (".$row['fileid']." / ".(count($objectIDs)).")"; }
+          }
+        } else {
+          $S3_skipped[0]++;
+          $S3_skipped[1]+=$row['size'];
+#          if ($showinfo) { echo "SKIP (TEST=$TEST)"; }
+        }
+      }
+      // Update progress
+      $new = sprintf('%.2f',$current/$complete*100).'% (now at user '.$user.')';
+      if ($prev != $new && !$showinfo) {
+        echo str_repeat(chr(8) , strlen($prev) );
+        $new.= (strlen($prev)<=strlen($new))? '' : str_repeat(' ' , strlen($prev)-strlen($new) );
+        $prev = $new;
+        echo $prev;
+      }
+    }
+    $result->free_result();
+  }
+  if (!$showinfo) {
+    echo str_repeat(chr(8) , strlen($prev) );
+    $new = ' DONE ';
+    $new.= (strlen($prev)<=strlen($new))? '' : str_repeat(' ' , strlen($prev)-strlen($new) );
+    $prev = $new;
+    echo $prev;
+  }
+  if ($showinfo) { echo "\nNumber of objects in  S3: ".count($objects); }
+  echo "\nobjects removed from  S3: ".$S3_removed[0]   ."\t(".readableBytes($S3_removed[1]).")";
+  echo "\nobjects updated to    S3: ".$S3_updated[0]   ."\t(".readableBytes($S3_updated[1]).")";
+  echo "\nobjects skipped on    S3: ".$S3_skipped[0]   ."\t(".readableBytes($S3_skipped[1]).")";
+  echo "\nobjects in sync on    S3: ".count($objectIDs)."\t(".readableBytes($objectIDsSize).")";
+  if ($S3_removed[0]+$S3_updated[0]+$S3_skipped[0]+count($objectIDs) - count($objects) != 0 ) {
+    echo "\n\nERROR: The numbers do not add up!?\n\n";
+    die;
+  }
+}
+
+echo "\n".
+     "\n#########################################################################################".
+     "\ncheck files in oc_filecache... ";
+
+if (!$result = $mysqli->query("SELECT `ST`.`id`, `FC`.`fileid`, `FC`.`path`, `FC`.`storage_mtime`, `FC`.`size` FROM".
+                             " `oc_filecache` AS `FC`,".
+                             " `oc_storages`  AS `ST`,".
+                             " `oc_mimetypes` AS `MT`".
+                             " WHERE 1".
+#                              " AND fc.size      != 0".
+#                              " AND st.id LIKE 'object::%'".
+#                              " AND fc.fileid = '".substr($object['Key'],8)."'". # should be only one..
+
+                              " AND `ST`.`numeric_id` = `FC`.`storage`".
+                              " AND `FC`.`mimetype`   = `MT`.`id`".
+                              " AND `MT`.`mimetype`  != 'httpd/unix-directory'".
+                             " ORDER BY `ST`.`id`, `FC`.`fileid` ASC")) {
+  echo "\nERROR: query pos 3\n\n";
+  die;
+} else {
+  // Init progress
+  $complete = $result->num_rows;
+  $prev     = '';
+  $current  = 0;
+
+  echo "\nNumber of objects in oc_filecache: ".$result->num_rows.' ';
+  
+  $showinfo = !empty($TEST);
+  $showinfo = 0;
+  
+  $LOCAL_ADDED = [0,0];
+  while ($row = $result->fetch_assoc()) {
+    $current++;
+
+    if (empty($objectIDs[ $row['fileid'] ]) ) {
+      // Determine correct path
+      if (substr($row['id'], 0, 13) == 'object::user:') {
+        $path = $PATH_DATA . DIRECTORY_SEPARATOR . substr($row['id'], 13) . DIRECTORY_SEPARATOR . $row['path'];
+      }
+      else if (substr($row['id'], 0, 6) == 'home::') {
+        $path = $PATH_DATA . DIRECTORY_SEPARATOR . substr($row['id'], 6) . DIRECTORY_SEPARATOR . $row['path'];
+      } else {
+        $path = $PATH_DATA . DIRECTORY_SEPARATOR . $row['path'];
+      }
+      $user = substr($path, strlen($PATH_DATA. DIRECTORY_SEPARATOR));
+      $user = substr($user,0,strpos($user,DIRECTORY_SEPARATOR));
+      $users[ $user ] = 1;
+
+      if ($showinfo) { echo "\n".$user."\t".$row['fileid']."\t".$path."\t"; }
+      
+      # just for one user? set test = appdata_oczvcie795w3 (system wil not go to maintenance nor change database, just test and copy data!!)
+      if (is_numeric($TEST) || $TEST == $user ) {
+        if(file_exists($path) && is_file($path)){
+          if (!empty($TEST) && $TEST == 2) {
+            echo ' not uploaded ($TEST = 2)';
+          } else {
+            $result_s3 = S3put($s3, $bucket,[
+                                      'Key' => 'urn:oid:'.$row['fileid'],
+                                      #'Body'=> "Hello World!!",
+                                      'SourceFile' => $path,
+                                      'ACL' => 'private'//public-read'
+                                    ]);
+            if (strpos(' '.$result_s3,'ERROR:') == 1) {
+              echo "\n".$result_s3."\n\n";
+              die;
+            }
+            if ($showinfo) { echo "OK"; }
+          }
+          $LOCAL_ADDED[0]++;
+          $LOCAL_ADDED[1]+=$row['size'];
+        } else {
+          echo "\n".$path." (id:".$row['fileid'].") DOES NOT EXIST?!\n";
+          if (empty($TEST)) {
+            $mysqli->query("DELETE FROM `oc_filecache` WHERE `oc_filecache`.`fileid` = ".$row['fileid']);
+            echo "\t".'removed ($TEST = 0)'."\n";
+          } else {
+            echo "\t".'not removed ($TEST != 0)'."\n";
+          }
+        }
+      } else if ($showinfo) {
+        echo "SKIP (\$TEST = $TEST)";
+      }
+    } else {
+      if ($showinfo) { echo "\n"."\t".$row['fileid']."\t".$row['path']."\t"."SKIP";}
+    }
+    // Update progress
+    $new = sprintf('%.2f',$current/$complete*100).'% (now at user '.$user.')';
+
+    if ($prev != $new && !$showinfo) {
+      echo str_repeat(chr(8) , strlen($prev) );
+      $new.= (strlen($prev)<=strlen($new))? '' : str_repeat(' ' , strlen($prev)-strlen($new) );
+      $prev = $new;
+      echo $prev;
+    }
+  }
+  $result->free_result();
+  if (!$showinfo) {
+    echo str_repeat(chr(8) , strlen($prev) );
+    $new = ' DONE ';
+    $new.= (strlen($prev)<=strlen($new))? '' : str_repeat(' ' , strlen($prev)-strlen($new) );
+    $prev = $new;
+    echo $prev;
+  }  
+  echo "\nFiles in oc_filecache added to S3: ".$LOCAL_ADDED[0]."\t(".readableBytes($LOCAL_ADDED[1]).")";
+}
+echo "\nCopying files finished";
+
+echo "\n". # inspiration source: https://github.com/otherguy/nextcloud-cleanup/blob/main/clean.php
+     "\n#########################################################################################".
+     "\ncheck for canceled uploads in oc_filecache...".
+     "\n=> EXPERIMENTAL, I have not had this problem, so can not test.. => check only!";
+
+if (!$result = $mysqli->query("SELECT `oc_filecache`.`fileid`, `oc_filecache`.`path`, `oc_filecache`.`parent`, `oc_storages`.`id` AS `storage`, `oc_filecache`.`size`".
+                             " FROM `oc_filecache`".
+                             " LEFT JOIN `oc_storages` ON `oc_storages`.`numeric_id` = `oc_filecache`.`storage`".
+                             " WHERE `oc_filecache`.`parent` IN (".
+                             "   SELECT `fileid`".
+                             "   FROM `oc_filecache`".
+                             "   WHERE `parent` IN (SELECT fileid FROM `oc_filecache` WHERE `path`='uploads')".
+                             "   AND `storage_mtime` < UNIX_TIMESTAMP(NOW() - 24 * 60 * 60)".
+                             " ) AND `oc_storages`.`available` = 1")) {
+  echo "\nERROR: query pos 4";
+  die;
+} else {
+  $S3_removed = [0,0];
+  $S3_PARENTS = [];
+
+  while ($row = $result->fetch_assoc()) {
+    echo "\nCanceled upload: ".$row['path']." ( ".$row['size']." bytes)";
+    $S3_removed[0]++;
+    $S3_removed[1]+=$row['size'];
+    // Add parent object to array
+    $S3_PARENTS[] = $row['parent'];
+    if ( 1 ) {
+      echo ' EXPERIMENTAL: no deletion, only detection';
+    } else
+    if (!empty($TEST) && $TEST == 2) {
+      echo ' not removed ($TEST = 2)';
+    } else {
+      $result_s3 =  S3del($s3, $bucket, 'urn:oid:'.$row['fileid']);
+      if ($showinfo) { echo 'S3del:'.$result_s3; }
+      $mysqli->query("DELETE FROM `oc_filecache` WHERE `oc_filecache`.`fileid` = ".$row['fileid']);
+    }
+  }
+  if ($S3_removed[0] > 0 ) {
+    echo "\nobjects removed from  S3: ".$S3_removed[0]."\t(".readableBytes($S3_removed[1]).")";
+    // Delete all parent objects from the db
+    $S3_PARENTS = array_unique($S3_PARENTS);
+    echo "\nremoving parents... (".count($S3_PARENTS)." database entries)";
+    foreach ($S3_PARENTS as $s3_parent) {
+      echo "\nparent obeject id: ".$s3_parent;
+      if ( 1 ) {
+        echo ' EXPERIMENTAL: no deletion, only detection';
+      } else
+      if (!empty($TEST) && $TEST == 2) {
+        echo ' not removed ($TEST = 2)';
+      } else {
+        $mysqli->query("DELETE FROM `oc_filecache` WHERE `oc_filecache`.`fileid` = ".$s3_parent);
+        echo ' removed';
+      }
+    }
+  }
+}
+
+#########################################################################################
+if (empty($TEST)) {
+  $dashLine = "\n".
+              "\n#########################################################################################";
+              
+  $mysqli->query("UPDATE `oc_storages` SET `id`=CONCAT('object::user:', SUBSTRING_INDEX(`oc_storages`.`id`,':',-1)) WHERE `oc_storages`.`id` LIKE 'home::%'");
+  $UpdatesDone = $mysqli->affected_rows;
+  
+  //rename command
+  if ($LOCAL_STORE_ID == 0
+   || $OBJECT_STORE_ID== 0) { // standard rename
+    $mysqli->query("UPDATE `oc_storages` SET `id`='object::store:amazon::".$bucket."' WHERE `oc_storages`.`id` LIKE 'local::".$PATH_DATA."/'");
+    $UpdatesDone.= '/'.$mysqli->affected_rows;
+  } else {
+    $mysqli->query("UPDATE `oc_filecache` SET `storage` = '".$OBJECT_STORE_ID."' WHERE `storage` = '".$LOCAL_STORE_ID."'");
+    $UpdatesDone.= '/'.$mysqli->affected_rows;
+    #$mysqli->query("DELETE FROM `oc_storages` WHERE `oc_storages`.`numeric_id` = ".$OBJECT_STORE_ID);
+  }
+  if ($UpdatesDone == '0/0' ) {
+#    echo $dashLine." no modefications needed";
+  } else {
+    echo $dashLine."\noc_storages altered (".$UpdatesDone.")";
+  }
+
+  foreach ($users as $key => $value) {
+    $mysqli->query("UPDATE `oc_mounts` SET `mount_provider_class` = REPLACE(`mount_provider_class`, 'LocalHomeMountProvider', 'ObjectHomeMountProvider') WHERE `user_id` = '".$key."'");
+    if ($mysqli->affected_rows == 1) {
+      echo $dashLine."\n-Changed mount provider class off ".$key." from home to object";
+      $dashLine = '';
+    }
+  }  
+  
+  echo "\n".
+       "\n#########################################################################################";
+
+  if ($PREVIEW_DEL[1] > 0 ) {
+    echo "\nThere were preview images removed";
+    echo "\nNOTE: you can optionally run occ preview:generate-all => pre generate previews, do install preview generator)\n";
+  }
+  
+  foreach ($users as $key => $value) {
+    if (is_dir($PATH_DATA . DIRECTORY_SEPARATOR . $key)) {
+      echo "\nNOTE: you can remove the user folder of $key\tby: rm -rf ".$PATH_DATA . DIRECTORY_SEPARATOR . $key;
+    }
+  }
+  echo "\n";
+
+  if (is_string($CONFIG_OBJECTSTORE) && file_exists($CONFIG_OBJECTSTORE) ) {
+    echo "\nCopy storage.config.php to the config folder...".
+    copy($CONFIG_OBJECTSTORE,$PATH_NEXTCLOUD.'/config/storage.config.php');
+
+    if ($SET_MAINTENANCE) { // maintenance mode
+      $process = occ($OCC_BASE,'maintenance:mode --off');
+      echo $process;
+    }
+    echo "\n#########################################################################################".
+         "\n".
+         "\nALL DONE!".
+         "\n".
+         "\nLog into your Nextcloud instance and check!".
+         "\n".
+         "\nIf all looks well: do not forget to remove '/config/storage.config.php' (it should be".
+         "\n                   included in your config: having double config data is a risk..)".
+         "\nIf it's not OK   : set your instance in 'maintenance:mode --on' & restore your SQL backup".
+         "\n                   you'll be back to 'local' (let me know, via GitHub, I'll try to help)".
+         "\n#########################################################################################";
+  }
+  else if ($OBJECT_STORE_ID > 0 ) {
+    if ($SET_MAINTENANCE) { // maintenance mode
+      $process = occ($OCC_BASE,'maintenance:mode --off');
+      echo $process;
+    }
+    echo "\n#########################################################################################".
+         "\n".
+         "\nALL DONE!".
+         "\n".
+         "\nLog into your Nextcloud instance and check!".
+         "\n".
+         "\n#########################################################################################";
+  } else {
+    echo "\n#########################################################################################".
+         "\n".
+         "\nALMOST done, one more step:".
+         "\n".
+         "\n ====== ! ! THIS MUST BE DONE MANUALY ! ! ======".
+         "\n 1: add \$CONFIG_OBJECTSTORE to your config.php".
+         "\n 2: turn maintenance mode off".
+         "\n".
+         "\nThe importance of the order to do this is EXTREME, other order can brick your Nextcloud!!\n".
+         "\n".
+         "\n#########################################################################################";
+  }  
+  echo "\n\n";
+  
+} else {
+  echo "\n\ndone testing..\n";
+}
+
+#########################################################################################
+function occ($OCC_BASE,$OCC_COMMAND) {
+  $result = "\nset  ".$OCC_COMMAND.":\n";
+
+  ob_start();
+  passthru($OCC_BASE . $OCC_COMMAND);
+  $process = ob_get_contents();
+  ob_end_clean(); //Use this instead of ob_flush()
+  
+  return $result.$process."\n";
+}
+
+#########################################################################################
+function S3list($s3, $bucket, $maxIteration = 10000000) {
+  $objects = [];
+  try {
+    $iteration = 0;
+    $marker = '';
+    do {
+      $result = $s3->listObjects(['Bucket' => $bucket, 'Marker' => $marker]);
+      
+      if (rand(0,100) > 75 ) { echo '.'; }
+      
+      if ($result->get('Contents')) {
+        $objects = array_merge($objects, $result->get('Contents'));
+      }
+      if (count($objects)) {
+        $marker = $objects[count($objects) - 1]['Key'];
+      }
+    } while ($result->get('IsTruncated') && ++$iteration < $maxIteration);
+    if ($result->get('IsTruncated')) {
+      echo "\n".'WARNING: The number of keys greater than '.count($objects).' (the first part is loaded)';
+    }
+    return $objects;
+  } catch (S3Exception $e) {
+    return 'ERROR: Cannot retrieve objects: '.$e->getMessage();
+  }
+}
+#########################################################################################
+function S3put($s3, $bucket, $vars = array() ) {
+  #return 'dummy';
+  if (is_string($vars)      ) {
+    if (file_exists($vars)) {
+      $vars = array('SourceFile' => $vars);
+    }
+    else {
+      return 'ERROR: S3put($cms, $bucket, $vars)';      
+    }
+  }
+  if (empty($vars['Bucket'])     ) { $vars['Bucket'] = $bucket; }
+  if (empty($vars['Key'])
+   && !empty($vars['SourceFile'])) { $vars['Key'] = $vars['SourceFile']; }
+  if (empty($vars['ACL'])        ) { $vars['Key'] = 'private'; }
+
+  if (empty($vars['Bucket'])           ) { return 'ERROR: no Bucket'; }
+  if (empty($vars['Key'])              ) { return 'ERROR: no Key'; }
+  if (!file_exists($vars['SourceFile'])) { return 'ERROR: file \''.$vars['SourceFile'].'\' does not exist'; }
+
+  try {
+    if (isset($GLOBALS['MULTIPART_THRESHOLD'])
+     && filesize($vars['SourceFile']) > $GLOBALS['MULTIPART_THRESHOLD']*1024*1024) {
+        $uploader = new MultipartUploader($s3,
+                                          $vars['SourceFile'],
+                                          $vars);
+        $result = $uploader->upload();
+    } else {
+      if (filesize($vars['SourceFile']) > 2*1024*1024*1024) {
+        echo "\n".'WARNING: file \''.$vars['SourceFile'].'\' is larger then 2 Gb, consider enabeling \'MultipartUploader\'';
+      }
+      $result = $s3->putObject($vars);
+    }
+    if (!empty($result['ObjectURL'])) {
+      return 'OK: '.'ObjectURL:'.$result['ObjectURL'];
+    } else {
+      return 'ERROR: '.$vars['Key'].' was not uploaded';
+    }
+  } catch (MultipartUploadException | S3Exception | Exception $e) {
+    return 'ERROR: ' . $e->getMessage();
+  }
+}
+#########################################################################################
+function S3del($s3, $bucket, $vars = array() ) {
+  #return 'dummy';
+  if (is_string($vars)      ) { $vars = array('Key' => $vars); }
+  if (empty($vars['Bucket'])) { $vars['Bucket'] = $bucket; }
+
+  if (empty($vars['Bucket'])) { return 'ERROR: no Bucket'; }
+  if (empty($vars['Key'])   ) { return 'ERROR: no Key';    }
+
+  try {
+    $result = $s3->deleteObject($vars);
+    return 'OK: '.$vars['Key'].' was deleted (or didn\'t not exist)';
+  } catch (S3Exception $e) { return 'ERROR: ' . $e->getMessage(); }
+}
+#########################################################################################
+function S3get($s3, $bucket, $vars = array() ) {
+  #return 'dummy';
+  if (is_string($vars)      ) {
+    $vars = array('Key' => $vars);
+  }
+  if (empty($vars['Bucket']) ) { $vars['Bucket'] = $bucket; } // Bucket = the bucket
+  if (empty($vars['Key'])
+   && !empty($vars['SaveAs'])) { $vars['Key']    = $vars['SaveAs']; } // Key = the file-id/location in s3
+  if (empty($vars['SaveAs'])
+   && !empty($vars['Key'])   ) { $vars['SaveAs'] = $vars['Key']; } // SaveAs = local location+name
+
+  if (empty($vars['Bucket'])) { return 'ERROR: no Bucket'; }
+  if (empty($vars['Key'])   ) { return 'ERROR: no Key';    }
+
+  try {
+    if (1 || $cms['aws']['client']->doesObjectExist($vars['Bucket']
+                                              ,$vars['Key']) ) {
+      return $cms['aws']['client']->getObject($vars);
+    } else {
+      return 'ERROR: '.$vars['Key'].' does not exist';
+    }
+  } catch (S3Exception $e) { return 'ERROR: ' . $e->getMessage(); }
+}
+
+#########################################################################################
+function readableBytes($bytes) {
+  if ($bytes == 0) { return "0 bytes"; }
+  $i = floor(log($bytes) / log(1024));
+  $sizes = array('bytes', 'kb', 'Mb', 'Gb', 'Tb', 'Pb', 'Eb', 'Zb', 'Yb');
+  return sprintf('% 5.2f', $bytes / pow(1024, $i)) * 1 . ' ' . $sizes[$i];
+  #return sprintf('%.02F', $bytes / pow(1024, $i)) * 1 . ' ' . $sizes[$i];
+}

+ 144 - 0
dev/tools/nextcloud-S3-migration/s3_test.php

@@ -0,0 +1,144 @@
+<?php
+
+use Aws\S3\S3Client;
+
+require_once(dirname(__FILE__).'/vendor/autoload.php');
+
+$CONFIG = dirname(__FILE__).'/storage.config.php';
+
+include($CONFIG);
+
+echo "\nconnect to S3...\n";
+$bucket_name = $CONFIG['objectstore']['arguments']['bucket'];
+$s3 = new S3Client([
+    'version' => 'latest',
+    'endpoint' => 'http://'.$CONFIG['objectstore']['arguments']['hostname'],
+    'region'  => $CONFIG['objectstore']['arguments']['region'],
+    'credentials' => [
+        'key' => $CONFIG['objectstore']['arguments']['key'],
+        'secret' => $CONFIG['objectstore']['arguments']['secret'],
+    ],
+    'use_path_style_endpoint' => $CONFIG['objectstore']['arguments']['use_path_style']
+]);
+
+$buckets = $s3->listBuckets([
+]);
+echo $buckets;
+echo "\n";
+try {
+    foreach ($buckets['Buckets'] as $bucket){
+        echo "{$bucket['Name']}\t{$bucket['CreationDate']}\n";
+    }
+} catch (S3Exception $e) {
+    echo $e->getMessage();
+    echo "\n";
+}
+
+echo "\n".
+     "\nread files in S3...\n";
+echo "The contents of your bucket $bucket_name are: \n\n";
+
+$objects = S3list($s3, $bucket_name);
+
+//$objects = $s3->listObjectsV2([
+//       'Bucket' => $bucket_name,
+//]);
+//foreach ($objects['Contents'] as $object){
+//    echo "{$object['Key']}\t{$object['LastModified']}\n";
+//}
+//echo "\n";
+
+echo 'S3list:'.print_r($objects);
+echo "\n";
+
+/*
+$result_s3 =  S3put($s3, $bucket, [
+                          'SourceFile' => './nextcloud_25.tar.gz',
+                        ]);
+echo 'S3put:'.$result_s3;
+
+$bucket = 'nextcloud';
+$file_Path = './nextcloud_25.tar.gz';
+$key = basename($file_Path);
+try{
+    $result = $s3->putObject([
+        'Bucket'     => $bucket,
+        'Key'        => $key,
+        'SourceFile' => $file_Path,
+        'ACL'        => 'private',
+    ]);
+} catch (S3Exception $e) {
+    echo $e->getMessage() . "\n";
+}
+*/
+
+//#########################################################################################
+function S3list($s3, $bucket, $maxIteration = 10000000) {
+  $objects = [];
+  try {
+    $iteration = 0;
+    $marker = '';
+    do {
+      //$result = $s3->listObjectsV2(['Bucket' => $bucket, 'Marker' => $marker]);
+      $result = $s3->listObjectsV2(['Bucket' => $bucket]);
+      
+      if (rand(0,100) > 75 ) { echo '.'; }
+      
+      if ($result->get('Contents')) {
+        $objects = array_merge($objects, $result->get('Contents'));
+      }
+      if (count($objects)) {
+        $marker = $objects[count($objects) - 1]['Key'];
+      }
+    } while ($result->get('IsTruncated') && ++$iteration < $maxIteration);
+    if ($result->get('IsTruncated')) {
+      echo "\n".'WARNING: The number of keys greater than '.count($objects).' (the first part is loaded)';
+    }
+    return $objects;
+  } catch (S3Exception $e) {
+    return 'ERROR: Cannot retrieve objects: '.$e->getMessage();
+  }
+}
+//#########################################################################################
+function S3put($s3, $bucket, $vars = array() ) {
+  #return 'dummy';
+  if (is_string($vars)      ) {
+    if (file_exists($vars)) {
+      $vars = array('SourceFile' => $vars);
+    }
+    else {
+      return 'ERROR: S3put($cms, $bucket, $vars)';      
+    }
+  }
+  if (empty($vars['Bucket'])     ) { $vars['Bucket'] = $bucket; }
+  if (empty($vars['Key'])
+   && !empty($vars['SourceFile'])) { $vars['Key'] = $vars['SourceFile']; }
+  if (empty($vars['ACL'])        ) { $vars['ACL'] = 'private'; }
+
+  if (empty($vars['Bucket'])           ) { return 'ERROR: no Bucket'; }
+  if (empty($vars['Key'])              ) { return 'ERROR: no Key'; }
+  if (!file_exists($vars['SourceFile'])) { return 'ERROR: file \''.$vars['SourceFile'].'\' does not exist'; }
+
+  try {
+    if (isset($GLOBALS['MULTIPART_THRESHOLD'])
+     && filesize($vars['SourceFile']) > $GLOBALS['MULTIPART_THRESHOLD']*1024*1024) {
+        $uploader = new MultipartUploader($s3,
+                                          $vars['SourceFile'],
+                                          $vars);
+        $result = $uploader->upload();
+    } else {
+      if (filesize($vars['SourceFile']) > 2*1024*1024*1024) {
+        echo "\n".'WARNING: file \''.$vars['SourceFile'].'\' is larger then 2 Gb, consider enabeling \'MultipartUploader\'';
+      }
+      print_r($vars);
+      $result = $s3->putObject($vars);
+    }
+    if (!empty($result['ObjectURL'])) {
+      return 'OK: '.'ObjectURL:'.$result['ObjectURL'];
+    } else {
+      return 'ERROR: '.$vars['Key'].' was not uploaded';
+    }
+  } catch (MultipartUploadException | S3Exception | Exception $e) {
+    return 'ERROR: ' . $e->getMessage();
+  }
+}

+ 17 - 0
dev/tools/nextcloud-S3-migration/storage.config.php

@@ -0,0 +1,17 @@
+<?php
+$CONFIG = array (
+  'objectstore' => array(
+          'class' => 'OC\\Files\\ObjectStore\\S3',
+          'arguments' => array(
+                  'bucket' => 'nextcloud', 
+                  'autocreate' => true,
+                  'key' => 'Z3Z9QSQ315XANF0ZUXQS', 
+                  'secret' => 'IKJCPixZWj7i2JxvhMFnSYCXSgS4qZ7kC9DigfmX', 
+                  'hostname' => '192.168.56.71', 
+                  'port' => 80,
+                  'use_ssl' => false,
+                  'region' => 'eu-west-3', 
+                  'use_path_style' => true
+          ),
+  ),
+);

+ 0 - 0
report/compile.sh