2016-04-17 01:15:19 +00:00
|
|
|
# this set must be kept complete, otherwise the RobustUnpacker might malfunction:
|
2016-07-08 10:13:52 +00:00
|
|
|
ITEM_KEYS = frozenset(['path', 'source', 'rdev', 'chunks', 'chunks_healthy', 'hardlink_master',
|
2017-02-14 05:35:54 +00:00
|
|
|
'mode', 'user', 'group', 'uid', 'gid', 'mtime', 'atime', 'ctime', 'size',
|
2016-06-26 16:07:01 +00:00
|
|
|
'xattrs', 'bsdflags', 'acl_nfs4', 'acl_access', 'acl_default', 'acl_extended',
|
2016-07-21 22:19:56 +00:00
|
|
|
'part'])
|
2016-06-12 21:36:56 +00:00
|
|
|
|
|
|
|
# this is the set of keys that are always present in items:
|
|
|
|
REQUIRED_ITEM_KEYS = frozenset(['path', 'mtime', ])
|
|
|
|
|
|
|
|
# this set must be kept complete, otherwise rebuild_manifest might malfunction:
|
|
|
|
ARCHIVE_KEYS = frozenset(['version', 'name', 'items', 'cmdline', 'hostname', 'username', 'time', 'time_end',
|
|
|
|
'comment', 'chunker_params',
|
|
|
|
'recreate_cmdline', 'recreate_source_id', 'recreate_args'])
|
|
|
|
|
|
|
|
# this is the set of keys that are always present in archives:
|
|
|
|
REQUIRED_ARCHIVE_KEYS = frozenset(['version', 'name', 'items', 'cmdline', 'time', ])
|
2016-04-17 01:15:19 +00:00
|
|
|
|
2017-06-09 14:49:30 +00:00
|
|
|
# default umask, overridden by --umask, defaults to read/write only for owner
|
2016-04-17 01:15:19 +00:00
|
|
|
UMASK_DEFAULT = 0o077
|
|
|
|
|
|
|
|
CACHE_TAG_NAME = 'CACHEDIR.TAG'
|
|
|
|
CACHE_TAG_CONTENTS = b'Signature: 8a477f597d28d172789f06886806bc55'
|
|
|
|
|
Improve LoggedIO write performance, make commit mechanism more solid
- Instead of very small (5 MB-ish) segment files, use larger ones
- Request asynchronous write-out or write-through (TODO) where it is supported,
to achieve a continuously high throughput for writes
- Instead of depending on ordered writes (write data, commit tag, sync)
for consistency, do a double-sync commit as more serious RDBMS also do
i.e. write data, sync, write commit tag, sync
Since commits are very expensive in Borg at the moment this makes no
difference performance-wise.
New platform APIs: SyncFile, sync_dir
[x] Naive implementation (equivalent to what Borg did before)
[x] Linux implementation
[ ] Windows implementation
[-] OSX implementation (F_FULLSYNC)
2016-05-14 20:46:41 +00:00
|
|
|
# A large, but not unreasonably large segment size. Always less than 2 GiB (for legacy file systems). We choose
|
|
|
|
# 500 MiB which means that no indirection from the inode is needed for typical Linux file systems.
|
|
|
|
# Note that this is a soft-limit and can be exceeded (worst case) by a full maximum chunk size and some metadata
|
|
|
|
# bytes. That's why it's 500 MiB instead of 512 MiB.
|
|
|
|
DEFAULT_MAX_SEGMENT_SIZE = 500 * 1024 * 1024
|
|
|
|
|
2017-02-22 23:34:40 +00:00
|
|
|
# 20 MiB minus 41 bytes for a Repository header (because the "size" field in the Repository includes
|
|
|
|
# the header, and the total size was set to 20 MiB).
|
|
|
|
MAX_DATA_SIZE = 20971479
|
2017-02-22 15:53:03 +00:00
|
|
|
|
2017-06-24 16:31:34 +00:00
|
|
|
# MAX_OBJECT_SIZE = <20 MiB (MAX_DATA_SIZE) + 41 bytes for a Repository PUT header, which consists of
|
|
|
|
# a 1 byte tag ID, 4 byte CRC, 4 byte size and 32 bytes for the ID.
|
|
|
|
MAX_OBJECT_SIZE = MAX_DATA_SIZE + 41 # see LoggedIO.put_header_fmt.size assertion in repository module
|
2017-07-22 00:07:17 +00:00
|
|
|
assert MAX_OBJECT_SIZE == 20 * 1024 * 1024
|
2017-06-24 16:31:34 +00:00
|
|
|
|
|
|
|
# borg.remote read() buffer size
|
|
|
|
BUFSIZE = 10 * 1024 * 1024
|
|
|
|
|
2017-06-23 03:56:41 +00:00
|
|
|
# to use a safe, limited unpacker, we need to set a upper limit to the archive count in the manifest.
|
|
|
|
# this does not mean that you can always really reach that number, because it also needs to be less than
|
|
|
|
# MAX_DATA_SIZE or it will trigger the check for that.
|
|
|
|
MAX_ARCHIVES = 400000
|
|
|
|
|
2017-06-24 16:31:34 +00:00
|
|
|
# repo.list() / .scan() result count limit the borg client uses
|
|
|
|
LIST_SCAN_LIMIT = 100000
|
|
|
|
|
2017-06-03 19:54:41 +00:00
|
|
|
DEFAULT_SEGMENTS_PER_DIR = 1000
|
2016-04-17 01:15:19 +00:00
|
|
|
|
|
|
|
CHUNK_MIN_EXP = 19 # 2**19 == 512kiB
|
|
|
|
CHUNK_MAX_EXP = 23 # 2**23 == 8MiB
|
|
|
|
HASH_WINDOW_SIZE = 0xfff # 4095B
|
|
|
|
HASH_MASK_BITS = 21 # results in ~2MiB chunks statistically
|
|
|
|
|
|
|
|
# defaults, use --chunker-params to override
|
|
|
|
CHUNKER_PARAMS = (CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE)
|
|
|
|
|
|
|
|
# chunker params for the items metadata stream, finer granularity
|
2016-08-14 13:05:11 +00:00
|
|
|
ITEMS_CHUNKER_PARAMS = (15, 19, 17, HASH_WINDOW_SIZE)
|
2016-04-17 01:15:19 +00:00
|
|
|
|
implement files cache mode control, fixes #911
You can now control the files cache mode using this option:
--files-cache={ctime,mtime,size,inode,rechunk,disabled}*
(only some combinations are supported)
Previously, only these modes were supported:
- mtime,size,inode (default of borg < 1.1.0rc4)
- mtime,size (by using --ignore-inode)
- disabled (by using --no-files-cache)
Now, you additionally get:
- ctime alternatively to mtime (more safe), e.g.:
ctime,size,inode (this is the new default of borg >= 1.1.0rc4)
- rechunk (consider all files as changed, rechunk them)
Deprecated:
- --ignore-inodes (use modes without "inode")
- --no-files-cache (use "disabled" mode)
The tests needed some changes:
- previously, we use os.utime() to set a files mtime (atime) to specific
values, but that does not work for ctime.
- now use time.sleep() to create the "latest file" that usually does
not end up in the files cache (see FAQ)
2017-09-11 00:54:52 +00:00
|
|
|
# operating mode of the files cache (for fast skipping of unchanged files)
|
|
|
|
DEFAULT_FILES_CACHE_MODE_UI = 'ctime,size,inode'
|
|
|
|
DEFAULT_FILES_CACHE_MODE = 'cis' # == CacheMode(DEFAULT_FILES_CACHE_MODE_UI)
|
|
|
|
|
2016-04-17 01:15:19 +00:00
|
|
|
# return codes returned by borg command
|
|
|
|
# when borg is killed by signal N, rc = 128 + N
|
|
|
|
EXIT_SUCCESS = 0 # everything done, no problems
|
|
|
|
EXIT_WARNING = 1 # reached normal end of operation, but there were issues
|
|
|
|
EXIT_ERROR = 2 # terminated abruptly, did not reach end of operation
|
|
|
|
|
2017-09-05 02:44:38 +00:00
|
|
|
# never use datetime.isoformat(), it is evil. always use one of these:
|
|
|
|
# datetime.strftime(ISO_FORMAT) # output always includes .microseconds
|
|
|
|
# datetime.strftime(ISO_FORMAT_NO_USECS) # output never includes microseconds
|
|
|
|
ISO_FORMAT_NO_USECS = '%Y-%m-%dT%H:%M:%S'
|
|
|
|
ISO_FORMAT = ISO_FORMAT_NO_USECS + '.%f'
|
|
|
|
|
2016-04-17 01:15:19 +00:00
|
|
|
DASHES = '-' * 78
|
|
|
|
|
|
|
|
PBKDF2_ITERATIONS = 100000
|
2016-11-11 20:24:16 +00:00
|
|
|
|
|
|
|
|
|
|
|
REPOSITORY_README = """This is a Borg Backup repository.
|
|
|
|
See https://borgbackup.readthedocs.io/
|
|
|
|
"""
|
|
|
|
|
|
|
|
CACHE_README = """This is a Borg Backup cache.
|
|
|
|
See https://borgbackup.readthedocs.io/
|
|
|
|
"""
|