|
@@ -1,4 +1,5 @@
|
|
|
<?php
|
|
|
+putenv('LDAPTLS_CACERT=/etc/ssl/certs/cacism3.pem');
|
|
|
/* *********************************************************************************** */
|
|
|
/* 2023 code created by Eesger Toering / knoop.frl / geoarchive.eu */
|
|
|
/* GitHub: https://github.com/mrAceT/nextcloud-S3-local-S3-migration */
|
|
@@ -15,8 +16,13 @@
|
|
|
use Aws\S3\S3Client;
|
|
|
|
|
|
# uncomment this for large file uploads (Amazon advises this voor 100Mb+ files)
|
|
|
-#use Aws\S3\MultipartUploader;
|
|
|
-#$MULTIPART_THRESHOLD = 500; #Megabytes
|
|
|
+use Aws\Exception\AwsException;
|
|
|
+use Aws\S3\ObjectUploader;
|
|
|
+use Aws\S3\MultipartUploader;
|
|
|
+use Aws\Exception\MultipartUploadException;
|
|
|
+
|
|
|
+$MULTIPART_THRESHOLD = 1536; #Megabytes
|
|
|
+$MULTIPART_CONCURRENCY = 3;
|
|
|
|
|
|
echo "\n#########################################################################################".
|
|
|
"\n Migration tool for Nextcloud local to S3 version 0.35".
|
|
@@ -33,17 +39,17 @@ $PATH_NEXTCLOUD = $PATH_BASE; // Path of the public Nextcloud directory
|
|
|
|
|
|
$PATH_BACKUP = $PATH_BASE.'/bak'; // Path for backup of MySQL database (you must create it yourself..)
|
|
|
|
|
|
-$OCC_BASE = 'php81 -d memory_limit=1024M '.$PATH_NEXTCLOUD.'/occ ';
|
|
|
+$OCC_BASE = 'php -d memory_limit=2048M '.$PATH_NEXTCLOUD.'/occ ';
|
|
|
// don't forget this one --. (if you don't run the script as the 'clouduser', see first comment at the top)
|
|
|
#$OCC_BASE = 'sudo -u apache php81 -d memory_limit=1024M '.$PATH_NEXTCLOUD.'/occ ';
|
|
|
|
|
|
-$TEST = '2'; //'admin';//'appdata_oczvcie123w4';
|
|
|
+$TEST = '2'; //'admin';//'appdata_ocspss2ph00r';
|
|
|
// set to 0 for LIVE!!
|
|
|
// set to 1 for all data : NO db modifications, with file modifications/uplaods/removal
|
|
|
// set to user name for single user (migration) test
|
|
|
// set to 2 for complete dry run
|
|
|
|
|
|
-$SET_MAINTENANCE = 1; // only in $TEST=0 Nextcloud will be put into maintenance mode
|
|
|
+$SET_MAINTENANCE = 0; // only in $TEST=0 Nextcloud will be put into maintenance mode
|
|
|
// ONLY when migration is all done you can set this to 0 for the S3-consitancy checks
|
|
|
|
|
|
$SHOWINFO = 1; // set to 0 to force much less info (while testing)
|
|
@@ -62,7 +68,7 @@ echo "\n".
|
|
|
"\nSetting up local migration to S3 (sync)...\n";
|
|
|
|
|
|
// Autoload
|
|
|
-require_once(dirname(__FILE__).'/vendor/autoload.php');
|
|
|
+require_once(dirname(__FILE__).'/3rdparty/autoload.php');
|
|
|
|
|
|
echo "\nfirst load the nextcloud config...";
|
|
|
include($PATH_NEXTCLOUD.'/config/config.php');
|
|
@@ -181,7 +187,8 @@ if (empty($TEST)) {
|
|
|
echo "\ndatabase backup...";
|
|
|
if (!is_dir($PATH_BACKUP)) { echo "\$PATH_BACKUP folder does not exist\n"; die; }
|
|
|
|
|
|
-$process = shell_exec('mysqldump --host='.$CONFIG['dbhost'].
|
|
|
+[$host_clear, $host_port] = explode(':',$CONFIG['dbhost']);
|
|
|
+$process = shell_exec('mysqldump --host='.$host_clear.
|
|
|
' --user='.(empty($SQL_DUMP_USER)?$CONFIG['dbuser']:$SQL_DUMP_USER).
|
|
|
' --password='.escapeshellcmd( empty($SQL_DUMP_PASS)?$CONFIG['dbpassword']:$SQL_DUMP_PASS ).' '.$CONFIG['dbname'].
|
|
|
' > '.$PATH_BACKUP . DIRECTORY_SEPARATOR . 'backup.sql');
|
|
@@ -212,7 +219,7 @@ $bucket = $CONFIG['objectstore']['arguments']['bucket'];
|
|
|
$s3 = new S3Client([
|
|
|
'version' => 'latest',
|
|
|
//'endpoint' => 'https://'.$bucket.'.'.$CONFIG['objectstore']['arguments']['hostname'],
|
|
|
- 'endpoint' => 'http://'.$CONFIG['objectstore']['arguments']['hostname'],
|
|
|
+ 'endpoint' => $CONFIG['objectstore']['arguments']['hostname'] . ':' . $CONFIG['objectstore']['arguments']['port'],
|
|
|
//'bucket' => $bucket,
|
|
|
//'bucket_endpoint' => true,
|
|
|
'region' => $CONFIG['objectstore']['arguments']['region'],
|
|
@@ -275,6 +282,7 @@ if (!$result = $mysqli->query("SELECT `ST`.`id`, `FC`.`fileid`, `FC`.`path`, `FC
|
|
|
}
|
|
|
while ($row = $result->fetch_assoc()) {
|
|
|
// Determine correct path
|
|
|
+ //echo "\n\nPEDRO_".$row['id']."_PEDRO_".$row['path']."\n\n";
|
|
|
if (substr($row['id'], 0, 13) == 'object::user:') {
|
|
|
$path = $PATH_DATA . DIRECTORY_SEPARATOR . substr($row['id'], 13) . DIRECTORY_SEPARATOR . $row['path'];
|
|
|
}
|
|
@@ -442,7 +450,11 @@ else {
|
|
|
|
|
|
# just for one user? set test = appdata_oczvcie795w3 (system wil not go to maintenance nor change database, just test and copy data!!)
|
|
|
if (is_numeric($TEST) || $TEST == $user ) {
|
|
|
- #echo "\n".$path."\t".$row['storage_mtime'];
|
|
|
+ $newpath = shell_exec('grep '.$user.' ./uid_login.txt | cut -d" " -f2');
|
|
|
+ if (!empty($newpath)) {
|
|
|
+ $user_directory = rtrim($newpath);
|
|
|
+ $path = $user_directory . DIRECTORY_SEPARATOR . $row['path'];
|
|
|
+ }
|
|
|
if(file_exists($path) && is_file($path)){
|
|
|
if ($row['storage_mtime'] < filemtime($path) ) {
|
|
|
if ($showinfo) { echo $infoLine."\nID:".$object['Key']."\ton S3, but is older then local, upload..."; }
|
|
@@ -556,6 +568,12 @@ if (!$result = $mysqli->query("SELECT `ST`.`id`, `FC`.`fileid`, `FC`.`path`, `FC
|
|
|
|
|
|
# just for one user? set test = appdata_oczvcie795w3 (system wil not go to maintenance nor change database, just test and copy data!!)
|
|
|
if (is_numeric($TEST) || $TEST == $user ) {
|
|
|
+ $newpath = shell_exec('grep '.$user.' ./uid_login.txt | cut -d" " -f2');
|
|
|
+ if (!empty($newpath)) {
|
|
|
+ $user_directory = rtrim($newpath);
|
|
|
+ $path = $user_directory . DIRECTORY_SEPARATOR . $row['path'];
|
|
|
+ }
|
|
|
+ echo "\n".$user."\t".$row['fileid']."\t".$path."\t";
|
|
|
if(file_exists($path) && is_file($path)){
|
|
|
if (!empty($TEST) && $TEST == 2) {
|
|
|
echo ' not uploaded ($TEST = 2)';
|
|
@@ -827,13 +845,38 @@ function S3put($s3, $bucket, $vars = array() ) {
|
|
|
try {
|
|
|
if (isset($GLOBALS['MULTIPART_THRESHOLD'])
|
|
|
&& filesize($vars['SourceFile']) > $GLOBALS['MULTIPART_THRESHOLD']*1024*1024) {
|
|
|
+ /*
|
|
|
$uploader = new MultipartUploader($s3,
|
|
|
$vars['SourceFile'],
|
|
|
$vars);
|
|
|
$result = $uploader->upload();
|
|
|
+ */
|
|
|
+ // Using stream instead of file path
|
|
|
+ $source = fopen($vars['SourceFile'], 'rb');
|
|
|
+ $uploader = new ObjectUploader(
|
|
|
+ $s3,
|
|
|
+ $bucket,
|
|
|
+ $vars['Key'],
|
|
|
+ $source,
|
|
|
+ $vars['ACL'],
|
|
|
+ [ 'concurrency' => $GLOBALS['MULTIPART_CONCURRENCY'], 'part_size' => $GLOBALS['MULTIPART_THRESHOLD']*1024*1024 ]
|
|
|
+ );
|
|
|
+ do {
|
|
|
+ try {
|
|
|
+ $result = $uploader->upload();
|
|
|
+ } catch (MultipartUploadException $e) {
|
|
|
+ rewind($source);
|
|
|
+ $uploader = new MultipartUploader($s3, $source, [
|
|
|
+ 'state' => $e->getState(),
|
|
|
+ 'acl' => $vars['ACL'],
|
|
|
+ ]);
|
|
|
+ }
|
|
|
+ } while (!isset($result));
|
|
|
+ fclose($source);
|
|
|
+
|
|
|
} else {
|
|
|
if (filesize($vars['SourceFile']) > 2*1024*1024*1024) {
|
|
|
- echo "\n".'WARNING: file \''.$vars['SourceFile'].'\' is larger then 2 Gb, consider enabeling \'MultipartUploader\'';
|
|
|
+ echo "\n".'WARNING: file \''.$vars['SourceFile'].'\' is larger then 2 Gb, consider enabling \'MultipartUploader\'';
|
|
|
}
|
|
|
$result = $s3->putObject($vars);
|
|
|
}
|