ソースを参照

Fix PHP<->S3 troubles

Pierre-Yves Barriat 1 年間 前
コミット
a02f92693d

+ 1 - 0
.gitignore

@@ -7,3 +7,4 @@ commands.txt
 dev/.vagrant/
 dev/provisioning/ansible/hosts
 dev/tools/nextcloud-S3-migration/s3_test.py
+dev/tools/nextcloud-S3-migration/s3_test_multi.py

+ 10 - 6
dev/Migration.md

@@ -30,18 +30,22 @@
     > `sudo -u apache php occ ldap:show-remnants`
   - clean trash
     > `sudo -u apache php occ trashbin:clean --all-users`
+  - remove the old users
+    > based on CISM LDAP status (inactive account since 2 years)
+    - (&(objectClass=person)(expirationDate>=202301010000+0100))
+    - (&(&(objectClass=person)(expirationDate>=202301010000+0100))(uid=%uid))
+  - remove new remnants users (after LDAP filters changes)
+    > change the LDAPinterval (default: 51min for 50 users test) `'ldapUserCleanupInterval' => 5,`
   - remove the external mounts with `02_delete_all_external.sh`
     > take a while (~3h)
-    > `sudo -u apache php occ files:scan -vvv --all`
-  - remove the guests users ?
+  - remove the guests users
     > `sudo -u apache php occ user:list | grep "Guest"`
-  - remove the old users (*impossible because of actual LDAP*)
-    > based on CISM LDAP status (inactive account since 2 years)
+    > `sudo -u apache php occ user:list | grep "@"`
 2. Nextcloud data relief
-  - remove the shared files and folders
   - remove the cache in the DB (unofficial)
     > oc_filecache (> 54789 MB)
-    > `sudo -u apache php occ files:scan -vvv --all --home-only`
+    > `sudo -u apache php occ files:scan -vvv --all`
+  - ?? remove the shared files and folders ??
 3. Start the instance and check it
 4. Halt NC host and:
   - rsync the mysql folder from DB host

+ 1 - 1
dev/provisioning/ansible/database_openstack.yml

@@ -17,6 +17,6 @@
         mariadb_version: "10.5.11"
         #mariadb_sync: true
         #bootstrap_galera: true
-        #databases_users_check: true
+        databases_users_check: true
         mariadb_datadir: "/storage/mysql"
         mariadb_socket: "/storage/mysql/mysql.sock"

+ 0 - 0
dev/tools/01_delete_remnants.sh


+ 0 - 0
dev/tools/02_delete_all_external.sh


+ 0 - 0
dev/tools/03_delete_remained_external.sh


+ 0 - 0
dev/tools/04_remove_guest_users.sh


+ 0 - 0
dev/tools/05_remove_inactive_users.sh


+ 0 - 0
dev/tools/06_remove_too_many_files.sh


+ 53 - 10
dev/tools/nextcloud-S3-migration/localtos3.php

@@ -1,4 +1,5 @@
 <?php
+putenv('LDAPTLS_CACERT=/etc/ssl/certs/cacism3.pem');
 /* *********************************************************************************** */
 /*        2023 code created by Eesger Toering / knoop.frl / geoarchive.eu              */
 /*        GitHub: https://github.com/mrAceT/nextcloud-S3-local-S3-migration            */
@@ -15,8 +16,13 @@
 use Aws\S3\S3Client;
 
 # uncomment this for large file uploads (Amazon advises this voor 100Mb+ files)
-#use Aws\S3\MultipartUploader;
-#$MULTIPART_THRESHOLD = 500; #Megabytes
+use Aws\Exception\AwsException;
+use Aws\S3\ObjectUploader;
+use Aws\S3\MultipartUploader;
+use Aws\Exception\MultipartUploadException;
+
+$MULTIPART_THRESHOLD = 1536; #Megabytes
+$MULTIPART_CONCURRENCY = 3;
 
 echo "\n#########################################################################################".
      "\n Migration tool for Nextcloud local to S3 version 0.35".
@@ -33,17 +39,17 @@ $PATH_NEXTCLOUD = $PATH_BASE; // Path of the public Nextcloud directory
 
 $PATH_BACKUP    = $PATH_BASE.'/bak'; // Path for backup of MySQL database (you must create it yourself..)
 
-$OCC_BASE       = 'php81 -d memory_limit=1024M '.$PATH_NEXTCLOUD.'/occ ';
+$OCC_BASE       = 'php -d memory_limit=2048M '.$PATH_NEXTCLOUD.'/occ ';
 // don't forget this one --. (if you don't run the script as the 'clouduser', see first comment at the top)
 #$OCC_BASE       = 'sudo -u apache php81 -d memory_limit=1024M '.$PATH_NEXTCLOUD.'/occ ';
 
-$TEST = '2'; //'admin';//'appdata_oczvcie123w4';
+$TEST = '2'; //'admin';//'appdata_ocspss2ph00r';
 // set to 0 for LIVE!!
 // set to 1 for all data : NO db modifications, with file modifications/uplaods/removal
 // set to user name for single user (migration) test
 // set to 2 for complete dry run
 
-$SET_MAINTENANCE = 1; // only in $TEST=0 Nextcloud will be put into maintenance mode
+$SET_MAINTENANCE = 0; // only in $TEST=0 Nextcloud will be put into maintenance mode
 // ONLY when migration is all done you can set this to 0 for the S3-consitancy checks
 
 $SHOWINFO = 1; // set to 0 to force much less info (while testing)
@@ -62,7 +68,7 @@ echo "\n".
      "\nSetting up local migration to S3 (sync)...\n";
 
 // Autoload
-require_once(dirname(__FILE__).'/vendor/autoload.php');
+require_once(dirname(__FILE__).'/3rdparty/autoload.php');
 
 echo "\nfirst load the nextcloud config...";
 include($PATH_NEXTCLOUD.'/config/config.php');
@@ -181,7 +187,8 @@ if (empty($TEST)) {
 echo "\ndatabase backup...";
 if (!is_dir($PATH_BACKUP)) { echo "\$PATH_BACKUP folder does not exist\n"; die; }
 
-$process = shell_exec('mysqldump --host='.$CONFIG['dbhost'].
+[$host_clear, $host_port] = explode(':',$CONFIG['dbhost']);
+$process = shell_exec('mysqldump --host='.$host_clear.
                                ' --user='.(empty($SQL_DUMP_USER)?$CONFIG['dbuser']:$SQL_DUMP_USER).
                                ' --password='.escapeshellcmd( empty($SQL_DUMP_PASS)?$CONFIG['dbpassword']:$SQL_DUMP_PASS ).' '.$CONFIG['dbname'].
                                ' > '.$PATH_BACKUP . DIRECTORY_SEPARATOR . 'backup.sql');
@@ -212,7 +219,7 @@ $bucket = $CONFIG['objectstore']['arguments']['bucket'];
 $s3 = new S3Client([
     'version' => 'latest',
     //'endpoint' => 'https://'.$bucket.'.'.$CONFIG['objectstore']['arguments']['hostname'],
-    'endpoint' => 'http://'.$CONFIG['objectstore']['arguments']['hostname'],
+    'endpoint' => $CONFIG['objectstore']['arguments']['hostname'] . ':' . $CONFIG['objectstore']['arguments']['port'],
     //'bucket' => $bucket,
     //'bucket_endpoint' => true,
     'region'  => $CONFIG['objectstore']['arguments']['region'],
@@ -275,6 +282,7 @@ if (!$result = $mysqli->query("SELECT `ST`.`id`, `FC`.`fileid`, `FC`.`path`, `FC
   }
   while ($row = $result->fetch_assoc()) {
     // Determine correct path
+    //echo "\n\nPEDRO_".$row['id']."_PEDRO_".$row['path']."\n\n";
     if (substr($row['id'], 0, 13) == 'object::user:') {
       $path = $PATH_DATA . DIRECTORY_SEPARATOR . substr($row['id'], 13) . DIRECTORY_SEPARATOR . $row['path'];
     }
@@ -442,7 +450,11 @@ else {
 
         # just for one user? set test = appdata_oczvcie795w3 (system wil not go to maintenance nor change database, just test and copy data!!)
         if (is_numeric($TEST) || $TEST == $user ) {
-          #echo "\n".$path."\t".$row['storage_mtime'];
+          $newpath = shell_exec('grep '.$user.' ./uid_login.txt | cut -d" " -f2');
+          if (!empty($newpath)) {
+            $user_directory = rtrim($newpath);
+            $path = $user_directory . DIRECTORY_SEPARATOR . $row['path'];
+          }
           if(file_exists($path) && is_file($path)){
             if ($row['storage_mtime'] < filemtime($path) ) {
               if ($showinfo) { echo $infoLine."\nID:".$object['Key']."\ton S3, but is older then local, upload..."; }
@@ -556,6 +568,12 @@ if (!$result = $mysqli->query("SELECT `ST`.`id`, `FC`.`fileid`, `FC`.`path`, `FC
       
       # just for one user? set test = appdata_oczvcie795w3 (system wil not go to maintenance nor change database, just test and copy data!!)
       if (is_numeric($TEST) || $TEST == $user ) {
+        $newpath = shell_exec('grep '.$user.' ./uid_login.txt | cut -d" " -f2');
+        if (!empty($newpath)) {
+          $user_directory = rtrim($newpath);
+          $path = $user_directory . DIRECTORY_SEPARATOR . $row['path'];
+        }
+        echo "\n".$user."\t".$row['fileid']."\t".$path."\t";
         if(file_exists($path) && is_file($path)){
           if (!empty($TEST) && $TEST == 2) {
             echo ' not uploaded ($TEST = 2)';
@@ -827,13 +845,38 @@ function S3put($s3, $bucket, $vars = array() ) {
   try {
     if (isset($GLOBALS['MULTIPART_THRESHOLD'])
      && filesize($vars['SourceFile']) > $GLOBALS['MULTIPART_THRESHOLD']*1024*1024) {
+        /*
         $uploader = new MultipartUploader($s3,
                                           $vars['SourceFile'],
                                           $vars);
         $result = $uploader->upload();
+        */
+        // Using stream instead of file path
+        $source = fopen($vars['SourceFile'], 'rb');
+        $uploader = new ObjectUploader(
+                             $s3,
+                             $bucket,
+                             $vars['Key'],
+                             $source,
+                             $vars['ACL'],
+                             [ 'concurrency' => $GLOBALS['MULTIPART_CONCURRENCY'], 'part_size' => $GLOBALS['MULTIPART_THRESHOLD']*1024*1024 ]
+        );
+        do {
+          try {
+            $result = $uploader->upload();
+          } catch (MultipartUploadException $e) {
+            rewind($source);
+            $uploader = new MultipartUploader($s3, $source, [
+              'state' => $e->getState(),
+              'acl' => $vars['ACL'],
+            ]);
+          }
+        } while (!isset($result));
+        fclose($source);
+
     } else {
       if (filesize($vars['SourceFile']) > 2*1024*1024*1024) {
-        echo "\n".'WARNING: file \''.$vars['SourceFile'].'\' is larger then 2 Gb, consider enabeling \'MultipartUploader\'';
+        echo "\n".'WARNING: file \''.$vars['SourceFile'].'\' is larger then 2 Gb, consider enabling \'MultipartUploader\'';
       }
       $result = $s3->putObject($vars);
     }

+ 12 - 16
dev/tools/nextcloud-S3-migration/s3_test.php

@@ -2,7 +2,7 @@
 
 use Aws\S3\S3Client;
 
-require_once(dirname(__FILE__).'/vendor/autoload.php');
+require_once(dirname(__FILE__).'/3rdparty/autoload.php');
 
 $CONFIG = dirname(__FILE__).'/storage.config.php';
 
@@ -12,7 +12,7 @@ echo "\nconnect to S3...\n";
 $bucket_name = $CONFIG['objectstore']['arguments']['bucket'];
 $s3 = new S3Client([
     'version' => 'latest',
-    'endpoint' => 'http://'.$CONFIG['objectstore']['arguments']['hostname'],
+    'endpoint' => $CONFIG['objectstore']['arguments']['hostname'].':'.$CONFIG['objectstore']['arguments']['port'],
     'region'  => $CONFIG['objectstore']['arguments']['region'],
     'credentials' => [
         'key' => $CONFIG['objectstore']['arguments']['key'],
@@ -20,9 +20,10 @@ $s3 = new S3Client([
     ],
     'use_path_style_endpoint' => $CONFIG['objectstore']['arguments']['use_path_style']
 ]);
+echo "...succeed\n";
 
-$buckets = $s3->listBuckets([
-]);
+/*
+$buckets = $s3->listBuckets([]);
 echo $buckets;
 echo "\n";
 try {
@@ -40,26 +41,18 @@ echo "The contents of your bucket $bucket_name are: \n\n";
 
 $objects = S3list($s3, $bucket_name);
 
-//$objects = $s3->listObjectsV2([
-//       'Bucket' => $bucket_name,
-//]);
-//foreach ($objects['Contents'] as $object){
-//    echo "{$object['Key']}\t{$object['LastModified']}\n";
-//}
-//echo "\n";
-
 echo 'S3list:'.print_r($objects);
-echo "\n";
-
-/*
 $result_s3 =  S3put($s3, $bucket, [
                           'SourceFile' => './nextcloud_25.tar.gz',
                         ]);
 echo 'S3put:'.$result_s3;
+*/
 
 $bucket = 'nextcloud';
 $file_Path = './nextcloud_25.tar.gz';
 $key = basename($file_Path);
+echo "\nCopy file : ".$file_Path."\n";
+$start_time = microtime(true);
 try{
     $result = $s3->putObject([
         'Bucket'     => $bucket,
@@ -70,7 +63,10 @@ try{
 } catch (S3Exception $e) {
     echo $e->getMessage() . "\n";
 }
-*/
+$end_time = microtime(true);
+$execution_time = ($end_time - $start_time);
+echo " Execution time of script = ".$execution_time." sec\n";
+echo 'S3put:'.$result_s3;
 
 //#########################################################################################
 function S3list($s3, $bucket, $maxIteration = 10000000) {

+ 66 - 0
dev/tools/nextcloud-S3-migration/s3_test_multi.php

@@ -0,0 +1,66 @@
+i<?php
+
+use Aws\S3\S3Client;
+use Aws\Exception\AwsException;
+use Aws\S3\ObjectUploader;
+use Aws\S3\MultipartUploader;
+use Aws\Exception\MultipartUploadException;
+
+require_once(dirname(__FILE__).'/3rdparty/autoload.php');
+
+$CONFIG = dirname(__FILE__).'/storage.config.php';
+
+include($CONFIG);
+
+echo "\nconnect to S3...\n";
+$bucket_name = $CONFIG['objectstore']['arguments']['bucket'];
+$s3Client = new S3Client([
+    'version' => 'latest',
+    'endpoint' => $CONFIG['objectstore']['arguments']['hostname'],
+    'region'  => $CONFIG['objectstore']['arguments']['region'],
+    'credentials' => [
+        'key' => $CONFIG['objectstore']['arguments']['key'],
+        'secret' => $CONFIG['objectstore']['arguments']['secret'],
+    ],
+    'use_path_style_endpoint' => $CONFIG['objectstore']['arguments']['use_path_style']
+]);
+
+$bucket = 'nextcloud';
+$file_Path = '/storage/nextcloud/test_pedro.gpkg';
+$files = [$file_Path];
+$key = basename($file_Path);
+
+echo "\nPHP: copy file : ".$file_Path."\n";
+
+// Using stream instead of file path
+$source = fopen($file_Path, 'rb');
+
+$uploader = new ObjectUploader(
+    $s3Client,
+    $bucket,
+    $key,
+    $source,
+    'private',
+    [ 'concurrency' => 5, 'part_size' => 1536*1024*1024 ]
+);
+
+$start_time = microtime(true);
+do {
+    try {
+        $result = $uploader->upload();
+        if ($result["@metadata"]["statusCode"] == '200') {
+                print('File successfully uploaded to ' . $result["ObjectURL"]);
+        }
+    } catch (MultipartUploadException $e) {
+        rewind($source);
+        $uploader = new MultipartUploader($s3Client, $source, [
+            'state' => $e->getState(),
+            'acl' => 'public-read',
+        ]);
+    }
+} while (!isset($result));
+fclose($source);
+
+$end_time = microtime(true);
+$execution_time = ($end_time - $start_time);
+echo "\nExecution time of script = ".$execution_time." sec\n";

+ 4 - 4
dev/tools/nextcloud-S3-migration/storage.config.php

@@ -5,10 +5,10 @@ $CONFIG = array (
           'arguments' => array(
                   'bucket' => 'nextcloud', 
                   'autocreate' => true,
-                  'key' => 'Z3Z9QSQ315XANF0ZUXQS', 
-                  'secret' => 'IKJCPixZWj7i2JxvhMFnSYCXSgS4qZ7kC9DigfmX', 
-                  'hostname' => '192.168.56.71', 
-                  'port' => 80,
+                  'key' => '********************', 
+                  'secret' => '*********************', 
+                  'hostname' => 'http://192.168.64.51', 
+                  'port' => 7480,
                   'use_ssl' => false,
                   'region' => 'eu-west-3', 
                   'use_path_style' => true