Skip to content

Commit

Permalink
Always handle EOVERFLOW by falling back to the generic copy loop
Browse files Browse the repository at this point in the history
Previously EOVERFLOW handling was only applied for io::copy specialization
but not for fs::copy sharing the same code.

Additionally we lower the chunk size to 1GB since we have a user report
that older kernels may return EINVAL when passing 0x8000_0000
but smaller values succeed.
  • Loading branch information
the8472 committed Nov 13, 2020
1 parent 4854d41 commit bbfa92c
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 11 deletions.
10 changes: 4 additions & 6 deletions library/std/src/sys/unix/fs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1199,12 +1199,10 @@ pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {

match copy_regular_files(reader.as_raw_fd(), writer.as_raw_fd(), max_len) {
CopyResult::Ended(result) => result,
CopyResult::Fallback(written) => {
// fallback is only > 0 on EOVERFLOW, which shouldn't happen
// because the copy loop starts at a file offset 0 and countns down from `len`
assert_eq!(0, written);
io::copy::generic_copy(&mut reader, &mut writer)
}
CopyResult::Fallback(written) => match io::copy::generic_copy(&mut reader, &mut writer) {
Ok(bytes) => Ok(bytes + written),
Err(e) => Err(e),
},
}
}

Expand Down
10 changes: 5 additions & 5 deletions library/std/src/sys/unix/kernel_copy.rs
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,6 @@ pub(super) enum CopyResult {
/// Callers must handle fallback to a generic copy loop.
/// `Fallback` may indicate non-zero number of bytes already written
/// if one of the files' cursor +`max_len` would exceed u64::MAX (`EOVERFLOW`).
/// If the initial file offset was 0 then `Fallback` will only contain `0`.
pub(super) fn copy_regular_files(reader: RawFd, writer: RawFd, max_len: u64) -> CopyResult {
use crate::cmp;

Expand All @@ -462,10 +461,10 @@ pub(super) fn copy_regular_files(reader: RawFd, writer: RawFd, max_len: u64) ->
while written < max_len {
let copy_result = if has_copy_file_range {
let bytes_to_copy = cmp::min(max_len - written, usize::MAX as u64);
// cap to 2GB chunks in case u64::MAX is passed in as file size and the file has a non-zero offset
// this allows us to copy large chunks without hitting the limit,
// unless someone sets a file offset close to u64::MAX - 2GB, in which case the fallback would kick in
let bytes_to_copy = cmp::min(bytes_to_copy as usize, 0x8000_0000usize);
// cap to 1GB chunks in case u64::MAX is passed as max_len and the file has a non-zero seek position
// this allows us to copy large chunks without hitting EOVERFLOW,
// unless someone sets a file offset close to u64::MAX - 1GB, in which case a fallback would be required
let bytes_to_copy = cmp::min(bytes_to_copy as usize, 0x4000_0000usize);
let copy_result = unsafe {
// We actually don't have to adjust the offsets,
// because copy_file_range adjusts the file offset automatically
Expand Down Expand Up @@ -560,6 +559,7 @@ fn sendfile_splice(mode: SpliceMode, reader: RawFd, writer: RawFd, len: u64) ->

let mut written = 0u64;
while written < len {
// according to its manpage that's the maximum size sendfile() will copy per invocation
let chunk_size = crate::cmp::min(len - written, 0x7ffff000_u64) as usize;

let result = match mode {
Expand Down

0 comments on commit bbfa92c

Please sign in to comment.