make LoggedIO.close_segment reentrant

if anything blows up in the middle of a (first) invocation of close_segment()
and an exception gets raised, it could happen that close_segment() gets called
again (e.g. in Repository.__del__ or elsewhere).

As the self._write_fd was set to None rather late, it would re-enter the if-block
then.

The new code gets the value of self._write_fd and also sets it to None in one tuple
assignment, so re-entrance does not happen.

Also, it uses try/finally to make sure the important parts (fd.close()) gets executed,
even if there are exceptions in the other parts.
This commit is contained in:
Thomas Waldmann 2017-01-24 23:45:54 +01:00
parent 213e7b7364
commit 5a39d5c4f8
1 changed files with 19 additions and 13 deletions

View File

@ -821,19 +821,25 @@ class LoggedIO:
self.close_segment() # after-commit fsync()
def close_segment(self):
if self._write_fd:
self.segment += 1
self.offset = 0
self._write_fd.flush()
os.fsync(self._write_fd.fileno())
if hasattr(os, 'posix_fadvise'): # only on UNIX
# tell the OS that it does not need to cache what we just wrote,
# avoids spoiling the cache for the OS and other processes.
os.posix_fadvise(self._write_fd.fileno(), 0, 0, os.POSIX_FADV_DONTNEED)
dirname = os.path.dirname(self._write_fd.name)
self._write_fd.close()
sync_dir(dirname)
self._write_fd = None
# set self._write_fd to None early to guard against reentry from error handling code pathes:
fd, self._write_fd = self._write_fd, None
if fd is not None:
dirname = None
try:
self.segment += 1
self.offset = 0
dirname = os.path.dirname(fd.name)
fd.flush()
fileno = fd.fileno()
os.fsync(fileno)
if hasattr(os, 'posix_fadvise'): # only on UNIX
# tell the OS that it does not need to cache what we just wrote,
# avoids spoiling the cache for the OS and other processes.
os.posix_fadvise(fileno, 0, 0, os.POSIX_FADV_DONTNEED)
finally:
fd.close()
if dirname:
sync_dir(dirname)
MAX_DATA_SIZE = MAX_OBJECT_SIZE - LoggedIO.put_header_fmt.size