Browse Source

Apply patchs for OpenStack env

Pierre-Yves Barriat 1 year ago
parent
commit
54f94d1454

+ 46 - 30
dev/Migration.md

@@ -1,34 +1,47 @@
 # Migration Nextcloud Pelican --> Nextcloud test
 
 1. Upgrade nextcloud on Pelican
-2. Deploy a new Nextcloud instance with Vagrant (same NC release + MariaDB **10.6.12**)
-  > minimum 80GB for DB host, minimum 1.5T for NC host
+2. Deploy a new Nextcloud instance (FS) with OpenStack (same NC release + MariaDB **10.5.11**)
+  > minimum 100GB for DB host, storepelican available with NFS
+  > add `'proxy' => 'proxy.sipr.ucl.ac.be:889',` in the NC config
 3. Halt NC on Pelican and 
   - rsync the mysql folder from DB container
-  - rsync the Nextcloud folder from NC container
-  - rsync the NC data folder from storepelican
-4. Halt NC on Vagrant and copy the 3 folders above in the corresponding Vagrant hosts
-5. Check the parameters: merge from old to new instance
-  - NC config file 
-  - Selinux paths
-  - etc
-6. Start the new instance and check it
+  - rsync the Nextcloud data folder from storepelican to a test folder (still on storepelican)
+4. Halt mariadb in the DB VM and rsync the mysql folder above
+  > check ownership (mysql:mysql) on the folder before starting mariadb
+5. Halt apache in the NC VM and update the NC config
+  - overwrite 'passwordsalt' and 'secret' in the config from pelican
+  - add `putenv('LDAPTLS_CACERT=/etc/ssl/certs/cacism3.pem');`
+    > check the existence of `/etc/ssl/certs/cacism3.pem` on the NC VM
+  - overwrite 'datadirectory'
+    > check ownership (apache:apache) on the full datadirectory folder before starting apache
+6. Check the instance
+  - `sudo -u apache php occ status`
+  - `sudo -u apache php occ ldap:show-config`
+  - `sudo -u apache php occ ldap:test-config ""`
+  - go to "https://nextcloud.test.cism.ucl.ac.be" as admin, then as a user
 
 # Make Nextcloud lighter and cleaner
 
-1. Halt NC host
-2. Nextcloud data relief
+1. Nextcloud data relief
   - clean the files
-    > `occ files:cleanup`
-  - remove the cache in the DB (unofficial)
-    > oc_filecache (> 54789 MB)
-    > `occ files:scan -vvv --all --home-only`
-  - remove remnants users
-    > `occ ldap:show-remnants`
+    > `sudo -u apache php occ files:cleanup`
+  - remove remnants users with `01_delete_remnants.sh`
+    > `sudo -u apache php occ ldap:show-remnants`
+  - clean trash
+    > `sudo -u apache php occ trashbin:clean --all-users`
+  - remove the external mounts with `02_delete_all_external.sh`
+    > take a while (~3h)
+    > `sudo -u apache php occ files:scan -vvv --all`
   - remove the guests users ?
-    > `occ user:list | grep "Guest"`
-  - remove the external mounts
+    > `sudo -u apache php occ user:list | grep "Guest"`
+  - remove the old users (*impossible because of actual LDAP*)
+    > based on CISM LDAP status (inactive account since 2 years)
+2. Nextcloud data relief
   - remove the shared files and folders
+  - remove the cache in the DB (unofficial)
+    > oc_filecache (> 54789 MB)
+    > `sudo -u apache php occ files:scan -vvv --all --home-only`
 3. Start the instance and check it
 4. Halt NC host and:
   - rsync the mysql folder from DB host
@@ -37,16 +50,19 @@
 
 # Migration files --> S3
 
-1. Deploy a new Nextcloud instance with Vagrant (same NC release + MariaDB 10.6.12 + **CEPH 3 nodes of 2TB**)
-  > minimum 80GB for DB host, minimum 60GB for NC, minimum 1.3T for NC Data
-2. Halt NC on Vagrant host and 
+1. Disable cron:  `mv /etc/cron.d/nextcloud /var/www/html/nextcloud/etc_cron.d_nextcloud`
+2. Apply `nextcloud-S3-local-S3-migration`
+3. Check the instance
+4. Enable cron
+
+> !!! S3 migration: external mounts lost & shared files lost !!!
+
+# Nextcloud improved + S3
+
+1. Deploy a new Nextcloud instance (S3) with OpenStack (same NC release + MariaDB **10.5.11**)
+  > minimum 200GB for DB host, minimum 200GB for NC
+2. Halt NC on Vagrant host and
   - rsync the mysql folder
-  - rsync the Nextcloud folder 
+  - rsync the Nextcloud folder
   - rsync the NC data folder
 3. Start the new instance and check it
-4. Disable cron:  `mv /etc/cron.d/nextcloud /var/www/html/nextcloud/etc_cron.d_nextcloud`
-5. Apply `nextcloud-S3-local-S3-migration`
-6. Check the instance
-7. Enable cron
-
-> !!! S3 migration: external mounts lost & shared files lost !!!

+ 2 - 2
dev/README.md

@@ -210,7 +210,7 @@ HOSTS = [
 Examples:
 
 ```bash
-ansible -v -i '192.168.64.68,' --key-file /home/nextcloud/Documents/Secure/Unix/ssh/id_rsa_pedro -u rocky -b -m setup all
+ansible -v -i '192.168.64.68,' --key-file /home/nextcloud/Documents/Secure/Unix/ssh/id_rsa_pedro -u pedro -b -m setup all
 
-ansible-playbook -v -i provisioning/ansible/hosts_openstack -b provisioning/ansible/nextcloud_cism.yml
+ansible-playbook -v -i provisioning/ansible/hosts_openstack -b provisioning/ansible/playbook_openstack.yml
 ```

+ 4 - 4
dev/Vagrantfile

@@ -38,7 +38,7 @@ HOSTS = [
 
 HOSTS = [
   #{ :hostname => "db1",         :ip => NETWORK+"11",  :ram => 2048,  :cpu => 1,  :box => "rockylinux/8",   :group => "db_servers",     :synced_folder => "/backup/pelican/backup"},
-  { :hostname => "db1",         :ip => NETWORK+"11",  :ram => 2048,  :cpu => 1,  :box => "rockylinux/8",   :group => "db_servers"       },
+  { :hostname => "db1",         :ip => NETWORK+"11",  :ram => 3072,  :cpu => 1,  :box => "rockylinux/8",   :group => "db_servers"       },
   #{ :hostname => "db2",         :ip => NETWORK+"12",  :ram => 2048,  :cpu => 1,  :box => "rockylinux/8",   :group => "db_servers"       },
   #{ :hostname => "db3",         :ip => NETWORK+"13",  :ram => 2048,  :cpu => 1,  :box => "rockylinux/8",   :group => "db_servers"       },
   #{ :hostname => "lbsql1",      :ip => NETWORK+"19",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "db_lbal_servers", :state => "MASTER",  :priority => 101, :vip => NETWORK+"20" },
@@ -48,9 +48,9 @@ HOSTS = [
   #{ :hostname => "ceph1",       :ip => NETWORK+"71",  :ram => 3072,  :cpu => 2,  :box => "ubuntu/focal64", :group => "ceph_servers",    :disk_extra => "600GB",  :disk_name => "ceph_storage_extra" },
   #{ :hostname => "ceph2",       :ip => NETWORK+"72",  :ram => 2048,  :cpu => 1,  :box => "ubuntu/focal64", :group => "ceph_servers",    :disk_extra => "600GB",  :disk_name => "ceph_storage" },
   #{ :hostname => "ceph3",       :ip => NETWORK+"73",  :ram => 2048,  :cpu => 1,  :box => "ubuntu/focal64", :group => "ceph_servers",    :disk_extra => "600GB",  :disk_name => "ceph_storage" },
-  { :hostname => "ceph1",       :ip => NETWORK+"71",  :ram => 3072,  :cpu => 2,  :box => "ubuntu/focal64", :group => "ceph_servers"     },
-  { :hostname => "ceph2",       :ip => NETWORK+"72",  :ram => 2048,  :cpu => 1,  :box => "ubuntu/focal64", :group => "ceph_servers"     },
-  { :hostname => "ceph3",       :ip => NETWORK+"73",  :ram => 2048,  :cpu => 1,  :box => "ubuntu/focal64", :group => "ceph_servers"     },
+  { :hostname => "ceph1",       :ip => NETWORK+"71",  :ram => 3072,  :cpu => 2,  :box => "rockylinux/8", :group => "ceph_servers"     },
+  { :hostname => "ceph2",       :ip => NETWORK+"72",  :ram => 3072,  :cpu => 2,  :box => "rockylinux/8", :group => "ceph_servers"     },
+  { :hostname => "ceph3",       :ip => NETWORK+"73",  :ram => 3072,  :cpu => 2,  :box => "rockylinux/8", :group => "ceph_servers"     },
   #{ :hostname => "web.test",    :ip => NETWORK+"41",  :ram => 2048,  :cpu => 1,  :box => "rockylinux/8",   :group => "web_servers",     :synced_folder => "/backup/pelican"},
   { :hostname => "web.test",    :ip => NETWORK+"41",  :ram => 3072,  :cpu => 2,  :box => "rockylinux/8",   :group => "web_servers"      },
   #{ :hostname => "web.test",    :ip => NETWORK+"41",  :ram => 1024,  :cpu => 1,  :box => "rockylinux/8",   :group => "web_servers",     :ipdb => NETWORK+"20", :redisd => "keydb", :redisp => "6380", :redisv => NETWORK+"40", :priority => 101 },

+ 2 - 7
dev/provisioning/ansible/database.yml

@@ -5,11 +5,6 @@
   hosts: db_servers
   vars:
     ansible_python_interpreter: /usr/bin/python3
-  pre_tasks:
-    - name: define ansible_python_interpreter group // linux distribution
-      set_fact:
-        ansible_python_interpreter: /usr/bin/python2
-      when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7'
   roles:
     - role: mariadb
       vars:
@@ -17,5 +12,5 @@
         #mariadb_sync: true
         #bootstrap_galera: true
         #databases_users_check: true
-        mariadb_datadir: "/extent/mysql"
-        mariadb_socket: "/extent/mysql/mysql.sock"
+        #mariadb_datadir: "/storage/mysql"
+        #mariadb_socket: "/storage/mysql/mysql.sock"

+ 22 - 0
dev/provisioning/ansible/database_openstack.yml

@@ -0,0 +1,22 @@
+---
+- name: apply database configuration
+  collections:
+    - community.mysql
+  hosts: db_servers
+  vars:
+    ansible_python_interpreter: /usr/bin/python3
+  pre_tasks:
+    - name: install stuffs to be integrated later in salt
+      dnf:
+        name: ['rsync', 'libaio', 'libpmem', 'lsof']
+        state: present
+      when: ansible_distribution == 'Rocky'
+  roles:
+    - role: mariadb
+      vars:
+        mariadb_version: "10.5.11"
+        #mariadb_sync: true
+        #bootstrap_galera: true
+        #databases_users_check: true
+        mariadb_datadir: "/storage/mysql"
+        mariadb_socket: "/storage/mysql/mysql.sock"

+ 13 - 3
dev/provisioning/ansible/hosts_openstack

@@ -1,7 +1,17 @@
-nc-web-1.cism.ucl.ac.be ansible_host=192.168.64.68 ansible_user=rocky ansible_ssh_private_key_file=/home/nextcloud/Documents/Secure/Unix/ssh/id_rsa_pedro
+nc-db ansible_host=192.168.64.110 ansible_user=pedro ansible_ssh_private_key_file=/home/nextcloud/Documents/Secure/Unix/ssh/id_rsa_pedro
+nc-web1.cism.ucl.ac.be ansible_host=192.168.64.68 ansible_user=pedro ansible_ssh_private_key_file=/home/nextcloud/Documents/Secure/Unix/ssh/id_rsa_pedro
+nc-lb ansible_host=192.168.64.71 ansible_user=pedro ansible_ssh_private_key_file=/home/nextcloud/Documents/Secure/Unix/ssh/id_rsa_pedro
 
 [all]
-nc-web-1.cism.ucl.ac.be network_allowed=192.168.64.0/24
+nc-db network_allowed=192.168.64.0/24
+nc-web1.cism.ucl.ac.be network_allowed=192.168.64.0/24
+nc-lb network_allowed=192.168.64.0/24
+
+[db_servers]
+nc-db network_allowed=192.168.64.0/24
 
 [web_servers]
-nc-web-1.cism.ucl.ac.be network_allowed=192.168.64.0/24 nc_global_name=nextcloud.test
+nc-web1.cism.ucl.ac.be network_allowed=192.168.64.0/24 nc_global_name=nextcloud.test.cism.ucl.ac.be redis_daemon=keydb redis_port=6380
+
+[lbal_servers]
+nc-lb network_allowed=192.168.64.0/24 ssl_name=nextcloud.test.cism.ucl.ac.be

+ 3 - 8
dev/provisioning/ansible/nextcloud.yml

@@ -8,16 +8,11 @@
     ansible_python_interpreter: /usr/bin/python3
     redis_daemon: "redis"
     redis_port: "6379"
-  pre_tasks:
-    - name: define ansible_python_interpreter group // linux distribution
-      set_fact:
-        ansible_python_interpreter: /usr/bin/python2
-      when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7'
   roles:
     - role: redis
       when: redis_daemon == "redis" or redis_daemon == "keydb"
-    - role: keepalived
-      when: (groups['web_servers'] | length) > 1
+        #- role: keepalived
+        #  when: (groups['web_servers'] | length) > 1
         #- role: haproxy
         #  vars:
         #    #hatarget: "keydb"
@@ -31,7 +26,7 @@
     - role: nextcloud
       vars:
         ansible_become_pass: ""
-        NEXTCLOUD_VERSION: "25.0.5"
+        #NEXTCLOUD_VERSION: "27.0.2"
         nc_data_dir: /extent/nextcloud/data
         objectstore_s3_install: false
         #objectstore_s3_key: "229ZJOPCR6JHHU4HIP69"

+ 0 - 34
dev/provisioning/ansible/nextcloud_cism.yml

@@ -1,34 +0,0 @@
----
-- name: apply nextcloud role
-  collections:
-    - community.general
-    - ansible.posix
-  hosts: web_servers
-  vars:
-    ansible_python_interpreter: /usr/bin/python3
-  pre_tasks:
-    - name: install stuffs to be integrated later in salt
-      dnf:
-        name: ['epel-release', 'policycoreutils-python-utils', 'python3-cryptography', 'libselinux-python3']
-        state: present
-      when: ansible_os_family == 'Rocky'
-    - name: add hostname in hosts
-      blockinfile:
-        dest: /etc/hosts
-        #content: '{{ ansible_host }}	{{ nc_global_name }}'
-        content: '{{ ansible_host }} {{ ansible_fqdn }}'
-        state: present
-  roles:
-    #- role: mariadb
-    #  vars:
-    #    mariadb_datadir: "/storage/nextcloud/mysql"
-    #- role: web_php
-    - role: nextcloud
-      vars:
-        NEXTCLOUD_VERSION: "25.0.6"
-        nc_data_dir: /storage/nextcloud/data
-        objectstore_s3_install: false
-  environment:
-    http_proxy: "http://proxy.sipr.ucl.ac.be:889"
-    https_proxy: "http://proxy.sipr.ucl.ac.be:889"
-    no_proxy: "127.0.0.1, localhost, 192.168.64.68, 192.168.64.73, 192.168.64.51, nextcloud.test, ceph.cism.ucl.ac.be, 192.168.64.68"

+ 27 - 0
dev/provisioning/ansible/nextcloud_openstack.yml

@@ -0,0 +1,27 @@
+---
+- name: apply nextcloud role
+  collections:
+    - community.general
+    - ansible.posix
+  hosts: web_servers
+  vars:
+    ansible_python_interpreter: /usr/bin/python3
+      
+  roles:
+    - role: redis
+      when: redis_daemon == "redis" or redis_daemon == "keydb"
+    - role: web_php
+    - role: nextcloud
+      vars:
+        #ansible_become_pass: ""
+        NEXTCLOUD_VERSION: "25.0.12"
+        nc_data_dir: /storage/nextcloud/data
+        objectstore_s3_install: false
+        #objectstore_s3_key: "229ZJOPCR6JHHU4HIP69"
+        #objectstore_s3_secret: "xCnx98XN39fDKh3ACGQ9XuDJEOw7PRBxyFW4KjRs"
+        #objectstore_s3_hostname: "192.168.56.71"
+  
+  environment:
+    #http_proxy: http://proxy.sipr.ucl.ac.be:889
+    https_proxy: http://proxy.sipr.ucl.ac.be:889
+    #no_proxy: "127.0.0.1, localhost, 192.168.64.68, 192.168.64.73, 192.168.64.71, nextcloud.test.cism.ucl.ac.be, ceph.cism.ucl.ac.be, 192.168.64.110"

+ 1 - 1
dev/provisioning/ansible/playbook.yml

@@ -6,7 +6,7 @@
 - import_playbook: nextcloud.yml
 - import_playbook: loadbalancer.yml
 
-- import_playbook: essai.yml
+  #- import_playbook: essai.yml
 
   #- import_playbook: node_exporter.yml
   #- import_playbook: prometheus.yml

+ 8 - 0
dev/provisioning/ansible/playbook_openstack.yml

@@ -0,0 +1,8 @@
+---
+- import_playbook: requirements_openstack.yml
+- import_playbook: database_openstack.yml
+- import_playbook: nextcloud_openstack.yml
+- import_playbook: loadbalancer.yml
+  #- import_playbook: node_exporter.yml
+  #- import_playbook: prometheus.yml
+  #- import_playbook: grafana.yml

+ 19 - 0
dev/provisioning/ansible/requirements_openstack.yml

@@ -0,0 +1,19 @@
+---
+- name: apply nextcloud role
+  collections:
+    - community.general
+    - ansible.posix
+  hosts: all
+  vars:
+    ansible_python_interpreter: /usr/bin/python3
+  pre_tasks:
+    - name: install stuffs to be integrated later in salt
+      dnf:
+        name: ['epel-release', 'policycoreutils-python-utils', 'python3-cryptography', 'libselinux-python3', 'bzip2']
+        state: present
+      when: ansible_distribution == 'Rocky'
+    - name: add hostname in hosts
+      blockinfile:
+        dest: /etc/hosts
+        content: '{{ ansible_host }} {{ ansible_fqdn }}'
+        state: present

+ 1 - 1
dev/provisioning/ansible/roles

@@ -1 +1 @@
-Subproject commit 9d6aeba1a715e29fb9a9cfcf9a9603244ed2f8f9
+Subproject commit fadbb50f117d44b11b7a8525058e67d4b79c4efb

+ 102 - 0
report/2023_10_04.md

@@ -0,0 +1,102 @@
+---
+marp: true
+title: Projet de premier brevet
+author: P.Y. Barriat
+description: Brevet état d'avancement
+backgroundImage: url('assets/back.png')
+_backgroundImage: url('assets/garde.png')
+footer: 04/10/2023 | Brevet - état d'avancement | PY Barriat
+_footer: ""
+paginate: true
+_paginate: false
+math: true
+---
+
+Brevet : état d'avancement<!--fit-->
+===
+
+![h:250](assets/nextcloud.png)
+
+**Pierre-Yves Barriat**
+Projet de premier brevet `October 4th, 2023`
+
+---
+
+# Planification initiale
+
+- Phase 1 : Initialisation du projet (mai 2022)
+- Phase 2 : Analyse et conception (juillet 2022)
+- Phase 3 : Développement (novembre 2022)
+
+- Phase 4 : Test (*décembre 2022*)
+- Phase 5 : Documentation et présentation (*mars 2023*)
+
+Phase de développement (locale) terminée fin août 2023
+
+Début des tests dans l'environnement OpenStack du CISM en septembre
+
+---
+
+### Développement Ansible
+
+> https://gogs.elic.ucl.ac.be/pbarriat/Brevet/src/master/dev
+
+- Stockage : ceph, glusterfs, nfs
+- Monitoring : grafana, node_exporter, prometheus
+- DB : mariadb, proxysql, galera
+- Outils : haproxy, keepalived
+- Caching: Redis, keydb
+- Serveur Web: apache, nginx, php
+- Nextcloud
+
+### Développement Nextcloud
+
+- Script de migration FS / S3 en PHP
+
+---
+
+<!-- Environnement de développement avancé -->
+
+![h:850](./assets/dia_nc_dev_improved.png)
+
+---
+
+<!-- Environnement de test pour migration -->
+
+![h:850](./assets/dia_nc_test_migration.png)
+
+---
+
+## Test : migration
+
+https://gogs.elic.ucl.ac.be/pbarriat/Brevet/src/master/dev/Migration.md
+
+## Test : intégration
+
+- montages externes
+- partages
+- apps
+- LDAP (? nouveau field pour NC ?)
+- shibboleth authentication ?
+
+## Test : performance
+
+---
+
+<!-- Environnement de test avancé -->
+
+![h:850](./assets/dia_nc_test_improved.png)
+
+---
+
+# Estimation
+
+- Phase 4 : Tests
+
+  3 mois (décembre 2023)
+
+- Phase 5 : **fin en mars 2024**
+
+  Déploiement prod
+  Documentation
+  Présentation 

BIN
report/assets/dia_nc_test_improved.png


BIN
report/assets/dia_nc_test_migration.png


BIN
report/assets/logo_nextcloud.png


BIN
report/assets/nextcloud.png


+ 3 - 0
report/compile_3.sh

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+marp --template bespoke --bespoke.progress --allow-local-files --theme ./assets/tum.css 2023_10_04.md -o Meeting_2023_10_04.html

+ 0 - 68
report/scripts/dia_nc_dev_tuned.py

@@ -1,68 +0,0 @@
-from diagrams import Cluster, Diagram, Edge
-
-from diagrams.azure.database import DatabaseForMariadbServers
-
-from diagrams.onprem.inmemory import Redis
-from diagrams.onprem.network import Internet
-from diagrams.onprem.groupware import Nextcloud
-from diagrams.onprem.monitoring import Grafana, Prometheus
-from diagrams.onprem.storage import Glusterfs
-from diagrams.onprem.network import Haproxy
-from diagrams.onprem.database import Mariadb
-
-from diagrams.custom import Custom
-
-with Diagram(filename="./assets/dia_nc_dev_improved", show=True, direction="TB"):
-    out = Internet("Intranet")    
-
-    with Cluster("Web & KeyDB Load Balancing"):
-        vm_web_lb = Custom("", "../assets/vagrant.png")
-        with Cluster("Node 1"):
-            ha_2 = Haproxy("Master")
-        with Cluster("Node 2"):
-            ha_1 = Haproxy("Backup")    
-
-    with Cluster("Shared persistent storage"):
-        vm_nfs = Custom("", "../assets/vagrant.png")
-        with Cluster("Node 2"):
-            nfs_2 = Glusterfs()
-        with Cluster("Node 1"):
-            nfs_1 = Glusterfs()   
-
-    with Cluster("Web server"):
-        vm_web = Custom("", "../assets/vagrant.png")
-        with Cluster("Node 2"):
-            nc_frontend_2 = Nextcloud()
-            keydb_2 = Custom("PHP Caching", "../assets/keydb.png")
-        with Cluster("Node 1"):
-            keydb_1 = Custom("PHP Caching", "../assets/keydb.png")
-            nc_frontend_1 = Nextcloud()     
-
-        nc_frontend_1 - Edge(style="dashed") - nc_frontend_2
-        keydb_1 - Edge(color="firebrick", style="dashed", label=" Master - Master") - keydb_2
-        ha_2 - Edge(color="firebrick", style="dashed") - ha_1 
-
-    with Cluster("SQL Load Balancing"):
-        vm_sql_lb = Custom("", "../assets/vagrant.png")
-        with Cluster("Active"):
-            ppSQL = Custom("", "../assets/proxysql.png")
-        with Cluster("Passive"):
-            paSQL = Custom("", "../assets/proxysql.png")           
-
-    with Cluster("Galera cluster"):
-        Custom("", "../assets/vagrant.png")        
-        with Cluster("Primary writer"):
-            primary = Mariadb()
-        with Cluster("Backup writer 2"):
-            bckp1 = DatabaseForMariadbServers()
-        with Cluster("Backup writer 1"):
-            bckp2 = DatabaseForMariadbServers()  
-    
-        paSQL - Edge(color="firebrick") - primary
-        bckp1 - Edge(style="dashed") - paSQL - Edge(style="dashed") - bckp2
-        
-        ppSQL - Edge(color="firebrick") - primary
-        bckp1 - Edge(style="dashed") - ppSQL - Edge(style="dashed") - bckp2
-    
-    out - vm_web_lb - vm_web
-    vm_sql_lb - vm_web - vm_nfs

+ 49 - 0
report/scripts/dia_nc_test_improved.py

@@ -0,0 +1,49 @@
+from diagrams import Cluster, Diagram, Edge
+
+from diagrams.azure.database import DatabaseForMariadbServers
+from diagrams.onprem.network import Internet
+from diagrams.onprem.groupware import Nextcloud
+from diagrams.onprem.monitoring import Grafana, Prometheus
+from diagrams.onprem.network import Haproxy
+import diagrams.generic.storage as storage_1
+from diagrams.azure.identity import Groups
+from diagrams.onprem.storage import Ceph, CephOsd
+
+from diagrams.custom import Custom
+
+mypath="/home/pbarriat/Documents/Boulot/UCL/Brevet/report/assets/"
+
+with Diagram(filename="../assets/dia_nc_test_improved", show=True, direction="TB"):
+    out = Internet("Intranet")    
+    ldap = Groups("LDAP")
+
+    with Cluster("SSL proxy + PHP Caching"):
+        openstack_lb = Custom("", mypath+"openstack.png")
+        lb = [Haproxy("Load Balancing")]
+
+    with Cluster("Web server"):
+        openstack_web = Custom("", mypath+"openstack.png")
+        with Cluster("Node 1"):
+            caching_1 = Custom("PHP Caching", mypath+"keydb.png")
+            nc_frontend_1 = [ Nextcloud(), caching_1]
+        with Cluster("Node 2"):
+            caching_2 = Custom("PHP Caching", mypath+"keydb.png")
+            nc_frontend_2 = [ Nextcloud(), caching_2]
+        caching_1 - Edge(label="Master-Master", style="dashed") - caching_2
+
+    with Cluster("Database"):
+        openstack_db = Custom("", mypath+"openstack.png")
+        nc_db = DatabaseForMariadbServers("Backup writer")
+ 
+    with Cluster("Ceph Storage Cluster"):
+        ceph_cluster = Ceph()
+        ceph_object = CephOsd("Object Gateway")
+        
+    #with Cluster("Control / Monitor"):
+    #    openstack_ctrl = Custom("", mypath+"openstack.png")
+    #    ctrl = [Prometheus(), Grafana()]        
+ 
+    openstack_lb >> openstack_web << openstack_db
+    ceph_cluster >> openstack_web << ldap
+    #openstack_ctrl >> openstack_web
+    out >> lb

+ 45 - 0
report/scripts/dia_nc_test_migration.py

@@ -0,0 +1,45 @@
+from diagrams import Cluster, Diagram, Edge
+
+from diagrams.azure.database import DatabaseForMariadbServers
+
+from diagrams.azure.database import SQLDatabases
+from diagrams.onprem.inmemory import Redis
+from diagrams.onprem.network import Internet
+from diagrams.onprem.groupware import Nextcloud
+from diagrams.onprem.monitoring import Grafana, Prometheus
+from diagrams.onprem.network import Haproxy
+from diagrams.onprem.database import Mariadb
+from diagrams.onprem.storage import Glusterfs
+import diagrams.generic.storage as storage_1
+from diagrams.azure.identity import Groups
+from diagrams.onprem.storage import Ceph, CephOsd
+
+from diagrams.custom import Custom
+
+mypath="/home/pbarriat/Documents/Boulot/UCL/Brevet/report/assets/"
+
+with Diagram(filename="../assets/dia_nc_test_migration", show=True, direction="TB"):
+    out = Internet("Intranet")    
+    ldap = Groups("LDAP")
+    nfs = storage_1.Storage("NFS")
+
+    with Cluster("Load Balancing"):
+        openstack_lb = Custom("", mypath+"openstack.png")
+        lb = [Haproxy("SSL proxy")]
+
+    with Cluster("Web server"):
+        openstack_web = Custom("", mypath+"openstack.png")
+        nc_frontend = [ Nextcloud(), Custom("PHP Caching", mypath+"keydb.png")]
+
+    with Cluster("Database"):
+        openstack_db = Custom("", mypath+"openstack.png")
+        nc_db = DatabaseForMariadbServers("Backup writer")
+ 
+    with Cluster("Ceph Storage Cluster"):
+        ceph_cluster = Ceph()
+        ceph_object = CephOsd("Object Gateway")
+ 
+    openstack_lb >> openstack_web << openstack_db
+    ldap >> openstack_web << nfs
+    ceph_cluster << Edge(label="Migration", color="firebrick") << openstack_web
+    out >> lb