diff --git a/doc/bugs/git_annex_copy_to_s3_out_of_memory/comment_1_5c0b73214c8f0ecb197cf0e109ff2729._comment b/doc/bugs/git_annex_copy_to_s3_out_of_memory/comment_1_5c0b73214c8f0ecb197cf0e109ff2729._comment new file mode 100644 index 0000000000..643ece91a7 --- /dev/null +++ b/doc/bugs/git_annex_copy_to_s3_out_of_memory/comment_1_5c0b73214c8f0ecb197cf0e109ff2729._comment @@ -0,0 +1,18 @@ +[[!comment format=mdwn + username="joey" + subject="""comment 1""" + date="2023-09-26T16:56:06Z" + content=""" +Is chunking or partsize configured on the S3 special remote? +The output of `git-annex info fcp-indi` will tell us that. + +How large are the files you're sending? Are any particularly large, eg +larger than available ram? + +Am I understanding correctly that you're sending 4 million files to S3? +And do these failures happen after a significant number of those files have +been sent, or more randomly? + +Does git-annex's memory use show a trend of increasing the more files are +sent? +"""]]