2015-12-14 20:23:52 +00:00
|
|
|
import argparse
|
2016-03-25 06:44:09 +00:00
|
|
|
import collections
|
2014-03-21 21:12:15 +00:00
|
|
|
import functools
|
2016-03-12 11:40:39 +00:00
|
|
|
import hashlib
|
2015-05-14 14:46:44 +00:00
|
|
|
import inspect
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
import logging
|
2010-10-25 18:22:20 +00:00
|
|
|
import os
|
2016-05-01 23:12:15 +00:00
|
|
|
import re
|
2016-01-28 20:59:24 +00:00
|
|
|
import shlex
|
2015-05-14 14:46:44 +00:00
|
|
|
import signal
|
2010-10-30 11:44:25 +00:00
|
|
|
import stat
|
2016-04-27 23:28:43 +00:00
|
|
|
import subprocess
|
2010-10-16 09:45:36 +00:00
|
|
|
import sys
|
2014-02-26 22:13:48 +00:00
|
|
|
import textwrap
|
2015-04-01 21:12:06 +00:00
|
|
|
import traceback
|
2016-05-30 23:18:03 +00:00
|
|
|
from binascii import unhexlify
|
|
|
|
from datetime import datetime
|
|
|
|
from itertools import zip_longest
|
|
|
|
|
|
|
|
from .logger import create_logger, setup_logging
|
|
|
|
logger = create_logger()
|
2010-02-23 21:12:22 +00:00
|
|
|
|
2015-05-22 17:21:41 +00:00
|
|
|
from . import __version__
|
2016-05-30 23:18:03 +00:00
|
|
|
from . import helpers
|
2016-07-04 17:07:37 +00:00
|
|
|
from .archive import Archive, ArchiveChecker, ArchiveRecreater, Statistics, is_special
|
|
|
|
from .archive import BackupOSError, CHUNKER_PARAMS
|
2016-05-30 23:18:03 +00:00
|
|
|
from .cache import Cache
|
|
|
|
from .constants import * # NOQA
|
2016-06-12 21:36:56 +00:00
|
|
|
from .helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR
|
|
|
|
from .helpers import Error, NoManifestError
|
2016-06-25 15:18:14 +00:00
|
|
|
from .helpers import location_validator, archivename_validator, ChunkerParams, CompressionSpec, PrefixSpec
|
2016-06-21 13:18:19 +00:00
|
|
|
from .helpers import BaseFormatter, ItemFormatter, ArchiveFormatter, format_time, format_file_size, format_archive
|
2016-05-30 22:33:13 +00:00
|
|
|
from .helpers import safe_encode, remove_surrogates, bin_to_hex
|
|
|
|
from .helpers import prune_within, prune_split
|
|
|
|
from .helpers import to_localtime, timestamp
|
|
|
|
from .helpers import get_cache_dir
|
|
|
|
from .helpers import Manifest
|
|
|
|
from .helpers import update_excludes, check_extension_modules
|
|
|
|
from .helpers import dir_is_tagged, is_slow_msgpack, yes, sysinfo
|
|
|
|
from .helpers import log_multi
|
|
|
|
from .helpers import parse_pattern, PatternMatcher, PathPrefixPattern
|
2016-07-14 00:11:11 +00:00
|
|
|
from .helpers import signal_handler
|
2016-07-27 22:29:23 +00:00
|
|
|
from .helpers import ErrorIgnoringTextIOWrapper
|
2016-05-31 23:45:45 +00:00
|
|
|
from .item import Item
|
2016-01-15 05:34:09 +00:00
|
|
|
from .key import key_creator, RepoKey, PassphraseKey
|
2016-05-30 23:18:03 +00:00
|
|
|
from .platform import get_flags
|
2016-01-16 23:28:54 +00:00
|
|
|
from .remote import RepositoryServer, RemoteRepository, cache_if_remote
|
2016-05-30 23:18:03 +00:00
|
|
|
from .repository import Repository
|
2016-04-27 22:06:19 +00:00
|
|
|
from .selftest import selftest
|
2016-05-30 23:18:03 +00:00
|
|
|
from .upgrader import AtticRepositoryUpgrader, BorgRepositoryUpgrader
|
2011-10-29 15:01:07 +00:00
|
|
|
|
|
|
|
|
2016-07-30 21:16:19 +00:00
|
|
|
STATS_HEADER = " Original size Compressed size Deduplicated size"
|
|
|
|
|
|
|
|
|
2016-03-22 23:41:15 +00:00
|
|
|
def argument(args, str_or_bool):
|
|
|
|
"""If bool is passed, return it. If str is passed, retrieve named attribute from args."""
|
|
|
|
if isinstance(str_or_bool, str):
|
|
|
|
return getattr(args, str_or_bool)
|
|
|
|
return str_or_bool
|
2015-11-20 20:01:29 +00:00
|
|
|
|
|
|
|
|
2016-03-22 23:41:15 +00:00
|
|
|
def with_repository(fake=False, create=False, lock=True, exclusive=False, manifest=True, cache=False):
|
2015-11-20 20:01:29 +00:00
|
|
|
"""
|
2016-03-22 23:41:15 +00:00
|
|
|
Method decorator for subcommand-handling methods: do_XYZ(self, args, repository, …)
|
|
|
|
|
|
|
|
If a parameter (where allowed) is a str the attribute named of args is used instead.
|
|
|
|
:param fake: (str or bool) use None instead of repository, don't do anything else
|
|
|
|
:param create: create repository
|
|
|
|
:param lock: lock repository
|
|
|
|
:param exclusive: (str or bool) lock repository exclusively (for writing)
|
|
|
|
:param manifest: load manifest and key, pass them as keyword arguments
|
|
|
|
:param cache: open cache, pass it as keyword argument (implies manifest)
|
|
|
|
"""
|
|
|
|
def decorator(method):
|
|
|
|
@functools.wraps(method)
|
|
|
|
def wrapper(self, args, **kwargs):
|
|
|
|
location = args.location # note: 'location' must be always present in args
|
2016-07-22 17:58:53 +00:00
|
|
|
append_only = getattr(args, 'append_only', False)
|
2016-03-22 23:41:15 +00:00
|
|
|
if argument(args, fake):
|
|
|
|
return method(self, args, repository=None, **kwargs)
|
|
|
|
elif location.proto == 'ssh':
|
2016-07-23 16:22:07 +00:00
|
|
|
repository = RemoteRepository(location, create=create, exclusive=argument(args, exclusive),
|
|
|
|
lock_wait=self.lock_wait, lock=lock, append_only=append_only, args=args)
|
2016-03-22 23:41:15 +00:00
|
|
|
else:
|
|
|
|
repository = Repository(location.path, create=create, exclusive=argument(args, exclusive),
|
2016-07-22 17:58:53 +00:00
|
|
|
lock_wait=self.lock_wait, lock=lock,
|
|
|
|
append_only=append_only)
|
2016-03-22 23:41:15 +00:00
|
|
|
with repository:
|
|
|
|
if manifest or cache:
|
|
|
|
kwargs['manifest'], kwargs['key'] = Manifest.load(repository)
|
|
|
|
if cache:
|
|
|
|
with Cache(repository, kwargs['key'], kwargs['manifest'],
|
|
|
|
do_files=getattr(args, 'cache_files', False), lock_wait=self.lock_wait) as cache_:
|
|
|
|
return method(self, args, repository=repository, cache=cache_, **kwargs)
|
|
|
|
else:
|
|
|
|
return method(self, args, repository=repository, **kwargs)
|
|
|
|
return wrapper
|
|
|
|
return decorator
|
|
|
|
|
|
|
|
|
|
|
|
def with_archive(method):
|
|
|
|
@functools.wraps(method)
|
|
|
|
def wrapper(self, args, repository, key, manifest, **kwargs):
|
|
|
|
archive = Archive(repository, key, manifest, args.location.archive,
|
2016-07-21 20:24:48 +00:00
|
|
|
numeric_owner=getattr(args, 'numeric_owner', False), cache=kwargs.get('cache'),
|
2016-07-21 22:19:56 +00:00
|
|
|
consider_part_files=args.consider_part_files)
|
2016-03-22 23:41:15 +00:00
|
|
|
return method(self, args, repository=repository, manifest=manifest, key=key, archive=archive, **kwargs)
|
|
|
|
return wrapper
|
2015-11-20 20:01:29 +00:00
|
|
|
|
|
|
|
|
2013-06-26 19:20:31 +00:00
|
|
|
class Archiver:
|
2010-03-15 20:23:34 +00:00
|
|
|
|
2016-05-25 20:01:38 +00:00
|
|
|
def __init__(self, lock_wait=None, prog=None):
|
2015-10-20 23:11:51 +00:00
|
|
|
self.exit_code = EXIT_SUCCESS
|
2015-11-21 14:34:51 +00:00
|
|
|
self.lock_wait = lock_wait
|
2016-05-25 20:01:38 +00:00
|
|
|
self.parser = self.build_parser(prog)
|
2010-10-30 11:44:25 +00:00
|
|
|
|
|
|
|
def print_error(self, msg, *args):
|
|
|
|
msg = args and msg % args or msg
|
2015-10-25 00:35:42 +00:00
|
|
|
self.exit_code = EXIT_ERROR
|
|
|
|
logger.error(msg)
|
2010-10-30 11:44:25 +00:00
|
|
|
|
2015-10-25 00:35:42 +00:00
|
|
|
def print_warning(self, msg, *args):
|
convert most print() calls to logging
the logging level varies: most is logging.info(), in some place
logging.warning() or logging.error() are used when the condition is
clearly an error or warning. in other cases, we keep using print, but
force writing to sys.stderr, unless we interact with the user.
there were 77 calls to print before this commit, now there are 7, most
of which in the archiver module, which interacts directly with the
user. in one case there, we still use print() only because logging is
not setup properly yet during argument parsing.
it could be argued that commands like info or list should use print
directly, but we have converted them anyways, without ill effects on
the unit tests
unit tests still use print() in some places
this switches all informational output to stderr, which should help
with, if not fix jborg/attic#312 directly
2015-10-01 17:41:42 +00:00
|
|
|
msg = args and msg % args or msg
|
2015-10-20 23:11:51 +00:00
|
|
|
self.exit_code = EXIT_WARNING # we do not terminate here, so it is a warning
|
2015-10-25 00:35:42 +00:00
|
|
|
logger.warning(msg)
|
2010-10-30 11:44:25 +00:00
|
|
|
|
2015-11-23 17:00:57 +00:00
|
|
|
def print_file_status(self, status, path):
|
2016-01-14 17:57:05 +00:00
|
|
|
if self.output_list and (self.output_filter is None or status in self.output_filter):
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
logging.getLogger('borg.output.list').info("%1s %s", status, remove_surrogates(path))
|
2010-10-30 11:44:25 +00:00
|
|
|
|
2016-03-12 11:40:39 +00:00
|
|
|
@staticmethod
|
|
|
|
def compare_chunk_contents(chunks1, chunks2):
|
|
|
|
"""Compare two chunk iterators (like returned by :meth:`.DownloadPipeline.fetch_many`)"""
|
|
|
|
end = object()
|
|
|
|
alen = ai = 0
|
|
|
|
blen = bi = 0
|
|
|
|
while True:
|
|
|
|
if not alen - ai:
|
|
|
|
a = next(chunks1, end)
|
|
|
|
if a is end:
|
|
|
|
return not blen - bi and next(chunks2, end) is end
|
2016-03-18 02:16:12 +00:00
|
|
|
a = memoryview(a.data)
|
2016-03-12 11:40:39 +00:00
|
|
|
alen = len(a)
|
|
|
|
ai = 0
|
|
|
|
if not blen - bi:
|
|
|
|
b = next(chunks2, end)
|
|
|
|
if b is end:
|
|
|
|
return not alen - ai and next(chunks1, end) is end
|
2016-03-18 02:16:12 +00:00
|
|
|
b = memoryview(b.data)
|
2016-03-12 11:40:39 +00:00
|
|
|
blen = len(b)
|
|
|
|
bi = 0
|
|
|
|
slicelen = min(alen - ai, blen - bi)
|
|
|
|
if a[ai:ai + slicelen] != b[bi:bi + slicelen]:
|
|
|
|
return False
|
|
|
|
ai += slicelen
|
|
|
|
bi += slicelen
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def build_matcher(excludes, paths):
|
|
|
|
matcher = PatternMatcher()
|
|
|
|
if excludes:
|
|
|
|
matcher.add(excludes, False)
|
|
|
|
include_patterns = []
|
|
|
|
if paths:
|
|
|
|
include_patterns.extend(parse_pattern(i, PathPrefixPattern) for i in paths)
|
|
|
|
matcher.add(include_patterns, True)
|
|
|
|
matcher.fallback = not include_patterns
|
|
|
|
return matcher, include_patterns
|
|
|
|
|
2014-03-24 20:28:59 +00:00
|
|
|
def do_serve(self, args):
|
2015-05-09 16:40:55 +00:00
|
|
|
"""Start in server mode. This command is usually not used manually.
|
2014-03-24 20:28:59 +00:00
|
|
|
"""
|
2016-06-30 15:59:12 +00:00
|
|
|
return RepositoryServer(restrict_to_paths=args.restrict_to_paths, append_only=args.append_only).serve()
|
2010-11-15 21:18:47 +00:00
|
|
|
|
2016-03-22 23:41:15 +00:00
|
|
|
@with_repository(create=True, exclusive=True, manifest=False)
|
|
|
|
def do_init(self, args, repository):
|
2014-04-06 13:16:25 +00:00
|
|
|
"""Initialize an empty repository"""
|
2015-12-12 12:50:24 +00:00
|
|
|
logger.info('Initializing repository at "%s"' % args.location.canonical_path())
|
2016-04-08 10:07:53 +00:00
|
|
|
try:
|
|
|
|
key = key_creator(repository, args)
|
|
|
|
except (EOFError, KeyboardInterrupt):
|
|
|
|
repository.destroy()
|
|
|
|
return EXIT_WARNING
|
2014-02-16 21:21:18 +00:00
|
|
|
manifest = Manifest(key, repository)
|
2012-12-04 22:02:10 +00:00
|
|
|
manifest.key = key
|
2011-09-04 21:02:47 +00:00
|
|
|
manifest.write()
|
2013-06-20 10:44:58 +00:00
|
|
|
repository.commit()
|
2016-01-17 00:09:13 +00:00
|
|
|
with Cache(repository, key, manifest, warn_if_unencrypted=False):
|
|
|
|
pass
|
2011-08-06 11:01:58 +00:00
|
|
|
return self.exit_code
|
2011-07-30 19:13:48 +00:00
|
|
|
|
2016-07-23 14:16:56 +00:00
|
|
|
@with_repository(exclusive=True, manifest=False)
|
2016-03-22 23:41:15 +00:00
|
|
|
def do_check(self, args, repository):
|
2014-04-06 13:16:25 +00:00
|
|
|
"""Check repository consistency"""
|
2014-02-09 14:52:36 +00:00
|
|
|
if args.repair:
|
2015-11-01 18:18:29 +00:00
|
|
|
msg = ("'check --repair' is an experimental feature that might result in data loss." +
|
|
|
|
"\n" +
|
|
|
|
"Type 'YES' if you understand this and want to continue: ")
|
2015-12-20 00:34:00 +00:00
|
|
|
if not yes(msg, false_msg="Aborting.", truish=('YES', ),
|
|
|
|
env_var_override='BORG_CHECK_I_KNOW_WHAT_I_AM_DOING'):
|
2015-11-01 18:18:29 +00:00
|
|
|
return EXIT_ERROR
|
2016-05-13 20:50:34 +00:00
|
|
|
if args.repo_only and args.verify_data:
|
|
|
|
self.print_error("--repository-only and --verify-data contradict each other. Please select one.")
|
|
|
|
return EXIT_ERROR
|
2014-03-04 20:56:37 +00:00
|
|
|
if not args.archives_only:
|
2015-11-18 01:27:25 +00:00
|
|
|
if not repository.check(repair=args.repair, save_space=args.save_space):
|
2015-10-20 23:11:51 +00:00
|
|
|
return EXIT_WARNING
|
2015-08-08 20:11:40 +00:00
|
|
|
if not args.repo_only and not ArchiveChecker().check(
|
2015-12-12 12:50:24 +00:00
|
|
|
repository, repair=args.repair, archive=args.location.archive,
|
2016-05-13 20:50:34 +00:00
|
|
|
last=args.last, prefix=args.prefix, verify_data=args.verify_data,
|
|
|
|
save_space=args.save_space):
|
2015-10-20 23:11:51 +00:00
|
|
|
return EXIT_WARNING
|
|
|
|
return EXIT_SUCCESS
|
2014-02-04 22:49:10 +00:00
|
|
|
|
2016-03-22 23:41:15 +00:00
|
|
|
@with_repository()
|
|
|
|
def do_change_passphrase(self, args, repository, manifest, key):
|
2014-04-06 13:16:25 +00:00
|
|
|
"""Change repository key file passphrase"""
|
2012-12-04 22:02:10 +00:00
|
|
|
key.change_passphrase()
|
2015-10-20 23:11:51 +00:00
|
|
|
return EXIT_SUCCESS
|
2011-10-27 20:17:47 +00:00
|
|
|
|
2016-03-22 23:41:15 +00:00
|
|
|
@with_repository(manifest=False)
|
|
|
|
def do_migrate_to_repokey(self, args, repository):
|
2016-01-15 05:34:09 +00:00
|
|
|
"""Migrate passphrase -> repokey"""
|
|
|
|
manifest_data = repository.get(Manifest.MANIFEST_ID)
|
|
|
|
key_old = PassphraseKey.detect(repository, manifest_data)
|
|
|
|
key_new = RepoKey(repository)
|
|
|
|
key_new.target = repository
|
|
|
|
key_new.repository_id = repository.id
|
|
|
|
key_new.enc_key = key_old.enc_key
|
|
|
|
key_new.enc_hmac_key = key_old.enc_hmac_key
|
|
|
|
key_new.id_key = key_old.id_key
|
|
|
|
key_new.chunk_seed = key_old.chunk_seed
|
|
|
|
key_new.change_passphrase() # option to change key protection passphrase, save
|
|
|
|
return EXIT_SUCCESS
|
|
|
|
|
2016-07-23 14:16:56 +00:00
|
|
|
@with_repository(fake='dry_run', exclusive=True)
|
2016-03-22 23:41:15 +00:00
|
|
|
def do_create(self, args, repository, manifest=None, key=None):
|
2014-04-06 13:16:25 +00:00
|
|
|
"""Create new archive"""
|
2016-01-18 15:45:48 +00:00
|
|
|
matcher = PatternMatcher(fallback=True)
|
|
|
|
if args.excludes:
|
|
|
|
matcher.add(args.excludes, False)
|
|
|
|
|
2016-01-17 00:09:13 +00:00
|
|
|
def create_inner(archive, cache):
|
|
|
|
# Add cache dir to inode_skip list
|
|
|
|
skip_inodes = set()
|
|
|
|
try:
|
|
|
|
st = os.stat(get_cache_dir())
|
|
|
|
skip_inodes.add((st.st_ino, st.st_dev))
|
2015-12-14 23:09:36 +00:00
|
|
|
except OSError:
|
2016-01-17 00:09:13 +00:00
|
|
|
pass
|
|
|
|
# Add local repository dir to inode_skip list
|
|
|
|
if not args.location.host:
|
|
|
|
try:
|
|
|
|
st = os.stat(args.location.path)
|
|
|
|
skip_inodes.add((st.st_ino, st.st_dev))
|
2015-12-14 23:09:36 +00:00
|
|
|
except OSError:
|
2016-01-17 00:09:13 +00:00
|
|
|
pass
|
|
|
|
for path in args.paths:
|
|
|
|
if path == '-': # stdin
|
|
|
|
path = 'stdin'
|
|
|
|
if not dry_run:
|
|
|
|
try:
|
|
|
|
status = archive.process_stdin(path, cache)
|
2016-06-30 22:13:53 +00:00
|
|
|
except BackupOSError as e:
|
2016-01-17 00:09:13 +00:00
|
|
|
status = 'E'
|
|
|
|
self.print_warning('%s: %s', path, e)
|
|
|
|
else:
|
|
|
|
status = '-'
|
|
|
|
self.print_file_status(status, path)
|
|
|
|
continue
|
|
|
|
path = os.path.normpath(path)
|
2016-04-17 14:41:03 +00:00
|
|
|
try:
|
|
|
|
st = os.lstat(path)
|
|
|
|
except OSError as e:
|
|
|
|
self.print_warning('%s: %s', path, e)
|
|
|
|
continue
|
2016-01-17 00:09:13 +00:00
|
|
|
if args.one_file_system:
|
2016-04-17 14:41:03 +00:00
|
|
|
restrict_dev = st.st_dev
|
2016-01-17 00:09:13 +00:00
|
|
|
else:
|
|
|
|
restrict_dev = None
|
2016-01-18 15:45:48 +00:00
|
|
|
self._process(archive, cache, matcher, args.exclude_caches, args.exclude_if_present,
|
2016-01-17 00:09:13 +00:00
|
|
|
args.keep_tag_files, skip_inodes, path, restrict_dev,
|
2016-04-17 14:41:03 +00:00
|
|
|
read_special=args.read_special, dry_run=dry_run, st=st)
|
2016-01-17 00:09:13 +00:00
|
|
|
if not dry_run:
|
2016-04-06 11:04:18 +00:00
|
|
|
archive.save(comment=args.comment, timestamp=args.timestamp)
|
2016-01-17 00:09:13 +00:00
|
|
|
if args.progress:
|
|
|
|
archive.stats.show_progress(final=True)
|
|
|
|
if args.stats:
|
2016-02-04 23:18:24 +00:00
|
|
|
archive.end = datetime.utcnow()
|
2016-01-17 00:09:13 +00:00
|
|
|
log_multi(DASHES,
|
|
|
|
str(archive),
|
|
|
|
DASHES,
|
2016-07-30 21:16:19 +00:00
|
|
|
STATS_HEADER,
|
2016-01-17 00:09:13 +00:00
|
|
|
str(archive.stats),
|
|
|
|
str(cache),
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
DASHES, logger=logging.getLogger('borg.output.stats'))
|
2016-01-17 00:09:13 +00:00
|
|
|
|
2015-12-02 01:55:59 +00:00
|
|
|
self.output_filter = args.output_filter
|
2016-01-14 17:57:05 +00:00
|
|
|
self.output_list = args.output_list
|
2016-03-15 14:38:55 +00:00
|
|
|
self.ignore_inode = args.ignore_inode
|
2015-09-08 01:12:45 +00:00
|
|
|
dry_run = args.dry_run
|
2016-02-04 23:18:24 +00:00
|
|
|
t0 = datetime.utcnow()
|
2015-09-08 01:12:45 +00:00
|
|
|
if not dry_run:
|
2016-01-17 00:09:13 +00:00
|
|
|
with Cache(repository, key, manifest, do_files=args.cache_files, lock_wait=self.lock_wait) as cache:
|
|
|
|
archive = Archive(repository, key, manifest, args.location.archive, cache=cache,
|
|
|
|
create=True, checkpoint_interval=args.checkpoint_interval,
|
|
|
|
numeric_owner=args.numeric_owner, progress=args.progress,
|
2016-04-18 23:13:10 +00:00
|
|
|
chunker_params=args.chunker_params, start=t0,
|
|
|
|
compression=args.compression, compression_files=args.compression_files)
|
2016-01-17 00:09:13 +00:00
|
|
|
create_inner(archive, cache)
|
2015-09-08 01:12:45 +00:00
|
|
|
else:
|
2016-01-17 00:09:13 +00:00
|
|
|
create_inner(None, None)
|
2010-10-30 11:44:25 +00:00
|
|
|
return self.exit_code
|
2010-02-20 21:28:46 +00:00
|
|
|
|
2016-01-18 15:45:48 +00:00
|
|
|
def _process(self, archive, cache, matcher, exclude_caches, exclude_if_present,
|
2015-11-09 03:08:49 +00:00
|
|
|
keep_tag_files, skip_inodes, path, restrict_dev,
|
2016-04-17 14:41:03 +00:00
|
|
|
read_special=False, dry_run=False, st=None):
|
2016-01-18 15:45:48 +00:00
|
|
|
if not matcher.match(path):
|
2016-03-30 16:17:54 +00:00
|
|
|
self.print_file_status('x', path)
|
2010-11-23 11:50:09 +00:00
|
|
|
return
|
2016-04-17 14:41:03 +00:00
|
|
|
if st is None:
|
|
|
|
try:
|
2016-07-04 17:07:37 +00:00
|
|
|
st = os.lstat(path)
|
2016-04-17 14:41:03 +00:00
|
|
|
except OSError as e:
|
|
|
|
self.print_warning('%s: %s', path, e)
|
|
|
|
return
|
2011-01-04 22:16:55 +00:00
|
|
|
if (st.st_ino, st.st_dev) in skip_inodes:
|
|
|
|
return
|
2016-07-30 22:25:53 +00:00
|
|
|
# if restrict_dev is given, we do not want to recurse into a new filesystem,
|
|
|
|
# but we WILL save the mountpoint directory (or more precise: the root
|
|
|
|
# directory of the mounted filesystem that shadows the mountpoint dir).
|
|
|
|
recurse = restrict_dev is None or st.st_dev == restrict_dev
|
2015-03-08 18:18:21 +00:00
|
|
|
status = None
|
2015-08-12 13:57:54 +00:00
|
|
|
# Ignore if nodump flag is set
|
2016-07-08 11:26:06 +00:00
|
|
|
try:
|
|
|
|
if get_flags(path, st) & stat.UF_NODUMP:
|
|
|
|
self.print_file_status('x', path)
|
|
|
|
return
|
|
|
|
except OSError as e:
|
|
|
|
self.print_warning('%s: %s', path, e)
|
2015-07-28 18:39:00 +00:00
|
|
|
return
|
2016-07-02 16:51:03 +00:00
|
|
|
if stat.S_ISREG(st.st_mode):
|
2015-09-08 01:12:45 +00:00
|
|
|
if not dry_run:
|
|
|
|
try:
|
2016-03-15 14:38:55 +00:00
|
|
|
status = archive.process_file(path, st, cache, self.ignore_inode)
|
2016-06-30 22:13:53 +00:00
|
|
|
except BackupOSError as e:
|
2015-10-25 00:53:36 +00:00
|
|
|
status = 'E'
|
2015-10-25 00:35:42 +00:00
|
|
|
self.print_warning('%s: %s', path, e)
|
2012-03-03 13:02:22 +00:00
|
|
|
elif stat.S_ISDIR(st.st_mode):
|
2016-07-30 22:25:53 +00:00
|
|
|
if recurse:
|
|
|
|
tag_paths = dir_is_tagged(path, exclude_caches, exclude_if_present)
|
|
|
|
if tag_paths:
|
|
|
|
if keep_tag_files and not dry_run:
|
|
|
|
archive.process_dir(path, st)
|
|
|
|
for tag_path in tag_paths:
|
|
|
|
self._process(archive, cache, matcher, exclude_caches, exclude_if_present,
|
|
|
|
keep_tag_files, skip_inodes, tag_path, restrict_dev,
|
|
|
|
read_special=read_special, dry_run=dry_run)
|
|
|
|
return
|
2015-09-08 01:12:45 +00:00
|
|
|
if not dry_run:
|
|
|
|
status = archive.process_dir(path, st)
|
2016-07-30 22:25:53 +00:00
|
|
|
if recurse:
|
|
|
|
try:
|
|
|
|
entries = helpers.scandir_inorder(path)
|
|
|
|
except OSError as e:
|
|
|
|
status = 'E'
|
|
|
|
self.print_warning('%s: %s', path, e)
|
|
|
|
else:
|
|
|
|
for dirent in entries:
|
|
|
|
normpath = os.path.normpath(dirent.path)
|
|
|
|
self._process(archive, cache, matcher, exclude_caches, exclude_if_present,
|
|
|
|
keep_tag_files, skip_inodes, normpath, restrict_dev,
|
|
|
|
read_special=read_special, dry_run=dry_run)
|
2010-11-23 11:50:09 +00:00
|
|
|
elif stat.S_ISLNK(st.st_mode):
|
2015-09-08 01:12:45 +00:00
|
|
|
if not dry_run:
|
2016-07-02 19:04:51 +00:00
|
|
|
if not read_special:
|
|
|
|
status = archive.process_symlink(path, st)
|
|
|
|
else:
|
|
|
|
st_target = os.stat(path)
|
|
|
|
if is_special(st_target.st_mode):
|
|
|
|
status = archive.process_file(path, st_target, cache)
|
|
|
|
else:
|
|
|
|
status = archive.process_symlink(path, st)
|
2012-03-03 13:02:22 +00:00
|
|
|
elif stat.S_ISFIFO(st.st_mode):
|
2015-09-08 01:12:45 +00:00
|
|
|
if not dry_run:
|
2016-07-02 17:44:26 +00:00
|
|
|
if not read_special:
|
|
|
|
status = archive.process_fifo(path, st)
|
|
|
|
else:
|
|
|
|
status = archive.process_file(path, st, cache)
|
2012-03-03 13:02:22 +00:00
|
|
|
elif stat.S_ISCHR(st.st_mode) or stat.S_ISBLK(st.st_mode):
|
2015-09-08 01:12:45 +00:00
|
|
|
if not dry_run:
|
2016-07-02 17:44:26 +00:00
|
|
|
if not read_special:
|
|
|
|
status = archive.process_dev(path, st)
|
|
|
|
else:
|
|
|
|
status = archive.process_file(path, st, cache)
|
2015-05-31 19:23:36 +00:00
|
|
|
elif stat.S_ISSOCK(st.st_mode):
|
|
|
|
# Ignore unix sockets
|
|
|
|
return
|
2016-03-21 08:23:55 +00:00
|
|
|
elif stat.S_ISDOOR(st.st_mode):
|
|
|
|
# Ignore Solaris doors
|
|
|
|
return
|
|
|
|
elif stat.S_ISPORT(st.st_mode):
|
|
|
|
# Ignore Solaris event ports
|
|
|
|
return
|
2010-11-23 11:50:09 +00:00
|
|
|
else:
|
2015-10-25 00:35:42 +00:00
|
|
|
self.print_warning('Unknown file type: %s', path)
|
2015-03-08 18:18:21 +00:00
|
|
|
return
|
|
|
|
# Status output
|
|
|
|
if status is None:
|
2015-09-08 01:12:45 +00:00
|
|
|
if not dry_run:
|
|
|
|
status = '?' # need to add a status code somewhere
|
|
|
|
else:
|
|
|
|
status = '-' # dry run, item was not backed up
|
2015-11-23 17:00:57 +00:00
|
|
|
self.print_file_status(status, path)
|
2010-11-23 11:50:09 +00:00
|
|
|
|
2016-08-17 20:36:25 +00:00
|
|
|
@staticmethod
|
2016-08-19 22:04:55 +00:00
|
|
|
def build_filter(matcher, is_hardlink_master, strip_components=0):
|
2016-08-17 20:36:25 +00:00
|
|
|
if strip_components:
|
|
|
|
def item_filter(item):
|
2016-08-19 22:04:55 +00:00
|
|
|
return (is_hardlink_master(item) or
|
|
|
|
matcher.match(item.path) and os.sep.join(item.path.split(os.sep)[strip_components:]))
|
2016-08-17 20:36:25 +00:00
|
|
|
else:
|
|
|
|
def item_filter(item):
|
2016-08-19 22:04:55 +00:00
|
|
|
return (is_hardlink_master(item) or
|
|
|
|
matcher.match(item.path))
|
2016-08-17 20:36:25 +00:00
|
|
|
return item_filter
|
|
|
|
|
2016-03-22 23:41:15 +00:00
|
|
|
@with_repository()
|
|
|
|
@with_archive
|
|
|
|
def do_extract(self, args, repository, manifest, key, archive):
|
2014-04-06 13:16:25 +00:00
|
|
|
"""Extract archive contents"""
|
2014-10-31 22:28:45 +00:00
|
|
|
# be restrictive when restoring files, restore permissions later
|
2015-04-21 20:29:10 +00:00
|
|
|
if sys.getfilesystemencoding() == 'ascii':
|
2015-10-02 14:58:08 +00:00
|
|
|
logger.warning('Warning: File system encoding is "ascii", extracting non-ascii filenames will not be supported.')
|
2015-11-07 23:57:02 +00:00
|
|
|
if sys.platform.startswith(('linux', 'freebsd', 'netbsd', 'openbsd', 'darwin', )):
|
|
|
|
logger.warning('Hint: You likely need to fix your locale setup. E.g. install locales and use: LANG=en_US.UTF-8')
|
2016-01-18 15:45:42 +00:00
|
|
|
|
2016-03-12 11:40:39 +00:00
|
|
|
matcher, include_patterns = self.build_matcher(args.excludes, args.paths)
|
2016-01-18 15:45:42 +00:00
|
|
|
|
2016-01-28 19:25:55 +00:00
|
|
|
output_list = args.output_list
|
2014-08-02 20:15:21 +00:00
|
|
|
dry_run = args.dry_run
|
2015-03-01 04:07:29 +00:00
|
|
|
stdout = args.stdout
|
2015-04-17 20:28:40 +00:00
|
|
|
sparse = args.sparse
|
2014-08-02 20:15:21 +00:00
|
|
|
strip_components = args.strip_components
|
2012-10-17 09:40:23 +00:00
|
|
|
dirs = []
|
2016-03-17 21:39:57 +00:00
|
|
|
partial_extract = not matcher.empty() or strip_components
|
|
|
|
hardlink_masters = {} if partial_extract else None
|
|
|
|
|
|
|
|
def item_is_hardlink_master(item):
|
2016-05-31 23:45:45 +00:00
|
|
|
return (partial_extract and stat.S_ISREG(item.mode) and
|
|
|
|
item.get('hardlink_master', True) and 'source' not in item)
|
2016-03-17 21:39:57 +00:00
|
|
|
|
2016-08-19 22:04:55 +00:00
|
|
|
filter = self.build_filter(matcher, item_is_hardlink_master, strip_components)
|
2016-08-05 20:26:59 +00:00
|
|
|
for item in archive.iter_items(filter, preload=True):
|
2016-05-31 23:45:45 +00:00
|
|
|
orig_path = item.path
|
2016-03-17 21:39:57 +00:00
|
|
|
if item_is_hardlink_master(item):
|
2016-05-31 23:45:45 +00:00
|
|
|
hardlink_masters[orig_path] = (item.get('chunks'), None)
|
|
|
|
if not matcher.match(item.path):
|
2016-03-17 21:39:57 +00:00
|
|
|
continue
|
2014-08-02 20:15:21 +00:00
|
|
|
if strip_components:
|
2016-05-31 23:45:45 +00:00
|
|
|
item.path = os.sep.join(orig_path.split(os.sep)[strip_components:])
|
2014-02-18 20:33:06 +00:00
|
|
|
if not args.dry_run:
|
2016-05-31 23:45:45 +00:00
|
|
|
while dirs and not item.path.startswith(dirs[-1].path):
|
2016-06-30 22:07:38 +00:00
|
|
|
dir_item = dirs.pop(-1)
|
|
|
|
try:
|
|
|
|
archive.extract_item(dir_item, stdout=stdout)
|
|
|
|
except BackupOSError as e:
|
2016-07-17 17:13:09 +00:00
|
|
|
self.print_warning('%s: %s', remove_surrogates(dir_item.path), e)
|
2016-01-28 19:25:55 +00:00
|
|
|
if output_list:
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
logging.getLogger('borg.output.list').info(remove_surrogates(orig_path))
|
2012-12-06 22:04:01 +00:00
|
|
|
try:
|
2014-08-02 20:15:21 +00:00
|
|
|
if dry_run:
|
2014-02-18 20:33:06 +00:00
|
|
|
archive.extract_item(item, dry_run=True)
|
2012-12-06 22:04:01 +00:00
|
|
|
else:
|
2016-05-31 23:45:45 +00:00
|
|
|
if stat.S_ISDIR(item.mode):
|
2014-02-18 20:33:06 +00:00
|
|
|
dirs.append(item)
|
|
|
|
archive.extract_item(item, restore_attrs=False)
|
|
|
|
else:
|
2016-03-17 21:39:57 +00:00
|
|
|
archive.extract_item(item, stdout=stdout, sparse=sparse, hardlink_masters=hardlink_masters,
|
|
|
|
original_path=orig_path)
|
2016-06-30 22:14:10 +00:00
|
|
|
except BackupOSError as e:
|
2015-10-25 00:35:42 +00:00
|
|
|
self.print_warning('%s: %s', remove_surrogates(orig_path), e)
|
2012-12-06 22:04:01 +00:00
|
|
|
|
2014-02-18 20:33:06 +00:00
|
|
|
if not args.dry_run:
|
|
|
|
while dirs:
|
2016-06-30 22:06:21 +00:00
|
|
|
dir_item = dirs.pop(-1)
|
|
|
|
try:
|
|
|
|
archive.extract_item(dir_item)
|
|
|
|
except BackupOSError as e:
|
2016-07-17 17:13:09 +00:00
|
|
|
self.print_warning('%s: %s', remove_surrogates(dir_item.path), e)
|
2016-01-18 15:45:42 +00:00
|
|
|
for pattern in include_patterns:
|
|
|
|
if pattern.match_count == 0:
|
2015-10-25 00:35:42 +00:00
|
|
|
self.print_warning("Include pattern '%s' never matched.", pattern)
|
2010-10-30 11:44:25 +00:00
|
|
|
return self.exit_code
|
2010-02-24 22:24:19 +00:00
|
|
|
|
2016-03-22 23:41:15 +00:00
|
|
|
@with_repository()
|
|
|
|
@with_archive
|
|
|
|
def do_diff(self, args, repository, manifest, key, archive):
|
2016-03-12 11:40:39 +00:00
|
|
|
"""Diff contents of two archives"""
|
|
|
|
def fetch_and_compare_chunks(chunk_ids1, chunk_ids2, archive1, archive2):
|
|
|
|
chunks1 = archive1.pipeline.fetch_many(chunk_ids1)
|
|
|
|
chunks2 = archive2.pipeline.fetch_many(chunk_ids2)
|
|
|
|
return self.compare_chunk_contents(chunks1, chunks2)
|
|
|
|
|
2016-04-12 01:08:03 +00:00
|
|
|
def sum_chunk_size(item, consider_ids=None):
|
2016-05-31 23:45:45 +00:00
|
|
|
if item.get('deleted'):
|
2016-03-21 12:48:08 +00:00
|
|
|
return None
|
|
|
|
else:
|
2016-05-31 23:45:45 +00:00
|
|
|
return sum(c.size for c in item.chunks
|
2016-04-16 15:48:47 +00:00
|
|
|
if consider_ids is None or c.id in consider_ids)
|
2016-03-21 12:48:08 +00:00
|
|
|
|
2016-03-12 11:40:39 +00:00
|
|
|
def get_owner(item):
|
|
|
|
if args.numeric_owner:
|
2016-05-31 23:45:45 +00:00
|
|
|
return item.uid, item.gid
|
2016-03-12 11:40:39 +00:00
|
|
|
else:
|
2016-05-31 23:45:45 +00:00
|
|
|
return item.user, item.group
|
2016-03-12 11:40:39 +00:00
|
|
|
|
2016-03-21 12:48:08 +00:00
|
|
|
def get_mode(item):
|
2016-05-31 23:45:45 +00:00
|
|
|
if 'mode' in item:
|
|
|
|
return stat.filemode(item.mode)
|
2016-03-21 12:48:08 +00:00
|
|
|
else:
|
|
|
|
return [None]
|
|
|
|
|
|
|
|
def has_hardlink_master(item, hardlink_masters):
|
2016-05-31 23:45:45 +00:00
|
|
|
return stat.S_ISREG(item.mode) and item.get('source') in hardlink_masters
|
2016-03-21 12:48:08 +00:00
|
|
|
|
|
|
|
def compare_link(item1, item2):
|
|
|
|
# These are the simple link cases. For special cases, e.g. if a
|
|
|
|
# regular file is replaced with a link or vice versa, it is
|
|
|
|
# indicated in compare_mode instead.
|
2016-05-31 23:45:45 +00:00
|
|
|
if item1.get('deleted'):
|
2016-03-21 12:48:08 +00:00
|
|
|
return 'added link'
|
2016-05-31 23:45:45 +00:00
|
|
|
elif item2.get('deleted'):
|
2016-03-21 12:48:08 +00:00
|
|
|
return 'removed link'
|
2016-05-31 23:45:45 +00:00
|
|
|
elif 'source' in item1 and 'source' in item2 and item1.source != item2.source:
|
2016-03-21 12:48:08 +00:00
|
|
|
return 'changed link'
|
|
|
|
|
|
|
|
def contents_changed(item1, item2):
|
|
|
|
if can_compare_chunk_ids:
|
2016-05-31 23:45:45 +00:00
|
|
|
return item1.chunks != item2.chunks
|
2016-03-21 12:48:08 +00:00
|
|
|
else:
|
|
|
|
if sum_chunk_size(item1) != sum_chunk_size(item2):
|
|
|
|
return True
|
|
|
|
else:
|
2016-05-31 23:45:45 +00:00
|
|
|
chunk_ids1 = [c.id for c in item1.chunks]
|
|
|
|
chunk_ids2 = [c.id for c in item2.chunks]
|
2016-03-21 12:48:08 +00:00
|
|
|
return not fetch_and_compare_chunks(chunk_ids1, chunk_ids2, archive1, archive2)
|
|
|
|
|
|
|
|
def compare_content(path, item1, item2):
|
|
|
|
if contents_changed(item1, item2):
|
2016-05-31 23:45:45 +00:00
|
|
|
if item1.get('deleted'):
|
2016-03-21 12:48:08 +00:00
|
|
|
return ('added {:>13}'.format(format_file_size(sum_chunk_size(item2))))
|
2016-05-31 23:45:45 +00:00
|
|
|
elif item2.get('deleted'):
|
2016-03-21 12:48:08 +00:00
|
|
|
return ('removed {:>11}'.format(format_file_size(sum_chunk_size(item1))))
|
|
|
|
else:
|
2016-05-31 23:45:45 +00:00
|
|
|
chunk_ids1 = {c.id for c in item1.chunks}
|
|
|
|
chunk_ids2 = {c.id for c in item2.chunks}
|
2016-04-12 01:08:03 +00:00
|
|
|
added_ids = chunk_ids2 - chunk_ids1
|
|
|
|
removed_ids = chunk_ids1 - chunk_ids2
|
|
|
|
added = sum_chunk_size(item2, added_ids)
|
|
|
|
removed = sum_chunk_size(item1, removed_ids)
|
2016-03-21 12:48:08 +00:00
|
|
|
return ('{:>9} {:>9}'.format(format_file_size(added, precision=1, sign=True),
|
2016-04-12 01:08:03 +00:00
|
|
|
format_file_size(-removed, precision=1, sign=True)))
|
2016-03-21 12:48:08 +00:00
|
|
|
|
|
|
|
def compare_directory(item1, item2):
|
2016-05-31 23:45:45 +00:00
|
|
|
if item2.get('deleted') and not item1.get('deleted'):
|
2016-03-21 12:48:08 +00:00
|
|
|
return 'removed directory'
|
2016-05-31 23:45:45 +00:00
|
|
|
elif item1.get('deleted') and not item2.get('deleted'):
|
2016-03-21 12:48:08 +00:00
|
|
|
return 'added directory'
|
|
|
|
|
|
|
|
def compare_owner(item1, item2):
|
|
|
|
user1, group1 = get_owner(item1)
|
|
|
|
user2, group2 = get_owner(item2)
|
|
|
|
if user1 != user2 or group1 != group2:
|
|
|
|
return '[{}:{} -> {}:{}]'.format(user1, group1, user2, group2)
|
|
|
|
|
|
|
|
def compare_mode(item1, item2):
|
2016-05-31 23:45:45 +00:00
|
|
|
if item1.mode != item2.mode:
|
2016-03-21 12:48:08 +00:00
|
|
|
return '[{} -> {}]'.format(get_mode(item1), get_mode(item2))
|
|
|
|
|
2016-03-31 07:33:03 +00:00
|
|
|
def compare_items(output, path, item1, item2, hardlink_masters, deleted=False):
|
2016-03-12 11:40:39 +00:00
|
|
|
"""
|
|
|
|
Compare two items with identical paths.
|
|
|
|
:param deleted: Whether one of the items has been deleted
|
|
|
|
"""
|
2016-03-21 12:48:08 +00:00
|
|
|
changes = []
|
|
|
|
|
|
|
|
if has_hardlink_master(item1, hardlink_masters):
|
2016-05-31 23:45:45 +00:00
|
|
|
item1 = hardlink_masters[item1.source][0]
|
2016-03-21 12:48:08 +00:00
|
|
|
|
|
|
|
if has_hardlink_master(item2, hardlink_masters):
|
2016-05-31 23:45:45 +00:00
|
|
|
item2 = hardlink_masters[item2.source][1]
|
2016-03-21 12:48:08 +00:00
|
|
|
|
|
|
|
if get_mode(item1)[0] == 'l' or get_mode(item2)[0] == 'l':
|
|
|
|
changes.append(compare_link(item1, item2))
|
|
|
|
|
2016-05-31 23:45:45 +00:00
|
|
|
if 'chunks' in item1 and 'chunks' in item2:
|
2016-03-21 12:48:08 +00:00
|
|
|
changes.append(compare_content(path, item1, item2))
|
|
|
|
|
|
|
|
if get_mode(item1)[0] == 'd' or get_mode(item2)[0] == 'd':
|
|
|
|
changes.append(compare_directory(item1, item2))
|
|
|
|
|
2016-03-12 11:40:39 +00:00
|
|
|
if not deleted:
|
2016-03-21 12:48:08 +00:00
|
|
|
changes.append(compare_owner(item1, item2))
|
|
|
|
changes.append(compare_mode(item1, item2))
|
|
|
|
|
|
|
|
changes = [x for x in changes if x]
|
|
|
|
if changes:
|
2016-03-31 07:33:03 +00:00
|
|
|
output_line = (remove_surrogates(path), ' '.join(changes))
|
|
|
|
|
|
|
|
if args.sort:
|
|
|
|
output.append(output_line)
|
|
|
|
else:
|
|
|
|
print_output(output_line)
|
|
|
|
|
|
|
|
def print_output(line):
|
|
|
|
print("{:<19} {}".format(line[1], line[0]))
|
2016-03-12 11:40:39 +00:00
|
|
|
|
|
|
|
def compare_archives(archive1, archive2, matcher):
|
2016-04-17 14:41:03 +00:00
|
|
|
def hardlink_master_seen(item):
|
2016-05-31 23:45:45 +00:00
|
|
|
return 'source' not in item or not stat.S_ISREG(item.mode) or item.source in hardlink_masters
|
2016-04-17 14:41:03 +00:00
|
|
|
|
|
|
|
def is_hardlink_master(item):
|
2016-05-31 23:45:45 +00:00
|
|
|
return item.get('hardlink_master', True) and 'source' not in item
|
2016-04-17 14:41:03 +00:00
|
|
|
|
|
|
|
def update_hardlink_masters(item1, item2):
|
|
|
|
if is_hardlink_master(item1) or is_hardlink_master(item2):
|
2016-05-31 23:45:45 +00:00
|
|
|
hardlink_masters[item1.path] = (item1, item2)
|
2016-04-17 14:41:03 +00:00
|
|
|
|
|
|
|
def compare_or_defer(item1, item2):
|
|
|
|
update_hardlink_masters(item1, item2)
|
|
|
|
if not hardlink_master_seen(item1) or not hardlink_master_seen(item2):
|
|
|
|
deferred.append((item1, item2))
|
|
|
|
else:
|
2016-05-31 23:45:45 +00:00
|
|
|
compare_items(output, item1.path, item1, item2, hardlink_masters)
|
2016-04-17 14:41:03 +00:00
|
|
|
|
2016-03-25 06:44:09 +00:00
|
|
|
orphans_archive1 = collections.OrderedDict()
|
|
|
|
orphans_archive2 = collections.OrderedDict()
|
2016-04-17 14:41:03 +00:00
|
|
|
deferred = []
|
2016-03-21 12:48:08 +00:00
|
|
|
hardlink_masters = {}
|
2016-03-31 07:33:03 +00:00
|
|
|
output = []
|
|
|
|
|
2016-03-12 11:40:39 +00:00
|
|
|
for item1, item2 in zip_longest(
|
2016-05-31 23:45:45 +00:00
|
|
|
archive1.iter_items(lambda item: matcher.match(item.path)),
|
|
|
|
archive2.iter_items(lambda item: matcher.match(item.path)),
|
2016-03-12 11:40:39 +00:00
|
|
|
):
|
2016-05-31 23:45:45 +00:00
|
|
|
if item1 and item2 and item1.path == item2.path:
|
2016-04-17 14:41:03 +00:00
|
|
|
compare_or_defer(item1, item2)
|
2016-03-12 11:40:39 +00:00
|
|
|
continue
|
|
|
|
if item1:
|
2016-05-31 23:45:45 +00:00
|
|
|
matching_orphan = orphans_archive2.pop(item1.path, None)
|
2016-03-12 11:40:39 +00:00
|
|
|
if matching_orphan:
|
2016-04-17 14:41:03 +00:00
|
|
|
compare_or_defer(item1, matching_orphan)
|
2016-03-12 11:40:39 +00:00
|
|
|
else:
|
2016-05-31 23:45:45 +00:00
|
|
|
orphans_archive1[item1.path] = item1
|
2016-03-12 11:40:39 +00:00
|
|
|
if item2:
|
2016-05-31 23:45:45 +00:00
|
|
|
matching_orphan = orphans_archive1.pop(item2.path, None)
|
2016-03-12 11:40:39 +00:00
|
|
|
if matching_orphan:
|
2016-04-17 14:41:03 +00:00
|
|
|
compare_or_defer(matching_orphan, item2)
|
2016-03-12 11:40:39 +00:00
|
|
|
else:
|
2016-05-31 23:45:45 +00:00
|
|
|
orphans_archive2[item2.path] = item2
|
2016-03-12 11:40:39 +00:00
|
|
|
# At this point orphans_* contain items that had no matching partner in the other archive
|
2016-05-31 23:45:45 +00:00
|
|
|
deleted_item = Item(
|
|
|
|
deleted=True,
|
|
|
|
chunks=[],
|
|
|
|
mode=0,
|
|
|
|
)
|
2016-03-12 11:40:39 +00:00
|
|
|
for added in orphans_archive2.values():
|
2016-05-31 23:45:45 +00:00
|
|
|
path = added.path
|
|
|
|
deleted_item.path = path
|
2016-04-17 14:41:03 +00:00
|
|
|
update_hardlink_masters(deleted_item, added)
|
|
|
|
compare_items(output, path, deleted_item, added, hardlink_masters, deleted=True)
|
2016-03-12 11:40:39 +00:00
|
|
|
for deleted in orphans_archive1.values():
|
2016-05-31 23:45:45 +00:00
|
|
|
path = deleted.path
|
|
|
|
deleted_item.path = path
|
2016-04-17 14:41:03 +00:00
|
|
|
update_hardlink_masters(deleted, deleted_item)
|
|
|
|
compare_items(output, path, deleted, deleted_item, hardlink_masters, deleted=True)
|
|
|
|
for item1, item2 in deferred:
|
|
|
|
assert hardlink_master_seen(item1)
|
|
|
|
assert hardlink_master_seen(item2)
|
2016-05-31 23:45:45 +00:00
|
|
|
compare_items(output, item1.path, item1, item2, hardlink_masters)
|
2016-03-12 11:40:39 +00:00
|
|
|
|
2016-03-31 07:33:03 +00:00
|
|
|
for line in sorted(output):
|
|
|
|
print_output(line)
|
|
|
|
|
2016-03-22 23:41:15 +00:00
|
|
|
archive1 = archive
|
2016-07-21 20:24:48 +00:00
|
|
|
archive2 = Archive(repository, key, manifest, args.archive2,
|
2016-07-21 22:19:56 +00:00
|
|
|
consider_part_files=args.consider_part_files)
|
2016-03-12 11:40:39 +00:00
|
|
|
|
2016-08-14 23:11:33 +00:00
|
|
|
can_compare_chunk_ids = archive1.metadata.get('chunker_params', False) == archive2.metadata.get(
|
|
|
|
'chunker_params', True) or args.same_chunker_params
|
2016-03-12 11:40:39 +00:00
|
|
|
if not can_compare_chunk_ids:
|
|
|
|
self.print_warning('--chunker-params might be different between archives, diff will be slow.\n'
|
|
|
|
'If you know for certain that they are the same, pass --same-chunker-params '
|
|
|
|
'to override this check.')
|
|
|
|
|
|
|
|
matcher, include_patterns = self.build_matcher(args.excludes, args.paths)
|
|
|
|
|
|
|
|
compare_archives(archive1, archive2, matcher)
|
|
|
|
|
|
|
|
for pattern in include_patterns:
|
|
|
|
if pattern.match_count == 0:
|
|
|
|
self.print_warning("Include pattern '%s' never matched.", pattern)
|
|
|
|
return self.exit_code
|
|
|
|
|
2016-03-22 23:41:15 +00:00
|
|
|
@with_repository(exclusive=True, cache=True)
|
|
|
|
@with_archive
|
|
|
|
def do_rename(self, args, repository, manifest, key, cache, archive):
|
2015-03-24 06:11:00 +00:00
|
|
|
"""Rename an existing archive"""
|
2016-03-22 23:41:15 +00:00
|
|
|
archive.rename(args.name)
|
|
|
|
manifest.write()
|
|
|
|
repository.commit()
|
|
|
|
cache.commit()
|
2015-03-24 06:11:00 +00:00
|
|
|
return self.exit_code
|
|
|
|
|
2016-06-05 00:14:14 +00:00
|
|
|
@with_repository(exclusive=True, manifest=False)
|
|
|
|
def do_delete(self, args, repository):
|
2015-03-09 15:02:06 +00:00
|
|
|
"""Delete an existing repository or archive"""
|
2016-03-22 23:41:15 +00:00
|
|
|
if args.location.archive:
|
2016-06-05 00:14:14 +00:00
|
|
|
manifest, key = Manifest.load(repository)
|
2016-04-11 10:50:39 +00:00
|
|
|
with Cache(repository, key, manifest, lock_wait=self.lock_wait) as cache:
|
|
|
|
archive = Archive(repository, key, manifest, args.location.archive, cache=cache)
|
|
|
|
stats = Statistics()
|
2016-07-01 02:27:06 +00:00
|
|
|
archive.delete(stats, progress=args.progress, forced=args.forced)
|
2016-04-11 10:50:39 +00:00
|
|
|
manifest.write()
|
|
|
|
repository.commit(save_space=args.save_space)
|
|
|
|
cache.commit()
|
|
|
|
logger.info("Archive deleted.")
|
|
|
|
if args.stats:
|
|
|
|
log_multi(DASHES,
|
2016-07-30 21:16:19 +00:00
|
|
|
STATS_HEADER,
|
2016-04-11 10:50:39 +00:00
|
|
|
stats.summary.format(label='Deleted data:', stats=stats),
|
|
|
|
str(cache),
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
DASHES, logger=logging.getLogger('borg.output.stats'))
|
2016-03-22 23:41:15 +00:00
|
|
|
else:
|
|
|
|
if not args.cache_only:
|
|
|
|
msg = []
|
2016-06-05 00:14:14 +00:00
|
|
|
try:
|
|
|
|
manifest, key = Manifest.load(repository)
|
|
|
|
except NoManifestError:
|
|
|
|
msg.append("You requested to completely DELETE the repository *including* all archives it may contain.")
|
|
|
|
msg.append("This repository seems to have no manifest, so we can't tell anything about its contents.")
|
|
|
|
else:
|
|
|
|
msg.append("You requested to completely DELETE the repository *including* all archives it contains:")
|
2016-08-15 02:17:41 +00:00
|
|
|
for archive_info in manifest.archives.list(sort_by='ts'):
|
2016-06-05 00:14:14 +00:00
|
|
|
msg.append(format_archive(archive_info))
|
2016-03-22 23:41:15 +00:00
|
|
|
msg.append("Type 'YES' if you understand this and want to continue: ")
|
|
|
|
msg = '\n'.join(msg)
|
|
|
|
if not yes(msg, false_msg="Aborting.", truish=('YES', ),
|
|
|
|
env_var_override='BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'):
|
|
|
|
self.exit_code = EXIT_ERROR
|
|
|
|
return self.exit_code
|
|
|
|
repository.destroy()
|
|
|
|
logger.info("Repository deleted.")
|
2016-04-11 10:50:39 +00:00
|
|
|
Cache.destroy(repository)
|
2016-03-22 23:41:15 +00:00
|
|
|
logger.info("Cache deleted.")
|
2010-10-30 11:44:25 +00:00
|
|
|
return self.exit_code
|
2010-02-24 22:24:19 +00:00
|
|
|
|
2016-03-22 23:41:15 +00:00
|
|
|
@with_repository()
|
|
|
|
def do_mount(self, args, repository, manifest, key):
|
2016-04-17 14:24:26 +00:00
|
|
|
"""Mount archive or an entire repository as a FUSE filesystem"""
|
2013-07-24 11:23:51 +00:00
|
|
|
try:
|
2015-05-22 17:21:41 +00:00
|
|
|
from .fuse import FuseOperations
|
2015-03-18 01:33:34 +00:00
|
|
|
except ImportError as e:
|
2015-10-25 00:35:42 +00:00
|
|
|
self.print_error('Loading fuse support failed [ImportError: %s]' % str(e))
|
2013-07-24 11:23:51 +00:00
|
|
|
return self.exit_code
|
|
|
|
|
|
|
|
if not os.path.isdir(args.mountpoint) or not os.access(args.mountpoint, os.R_OK | os.W_OK | os.X_OK):
|
|
|
|
self.print_error('%s: Mountpoint must be a writable directory' % args.mountpoint)
|
2013-07-24 11:05:47 +00:00
|
|
|
return self.exit_code
|
|
|
|
|
2016-03-22 23:41:15 +00:00
|
|
|
with cache_if_remote(repository) as cached_repo:
|
|
|
|
if args.location.archive:
|
2016-07-21 20:24:48 +00:00
|
|
|
archive = Archive(repository, key, manifest, args.location.archive,
|
2016-07-21 22:19:56 +00:00
|
|
|
consider_part_files=args.consider_part_files)
|
2016-03-22 23:41:15 +00:00
|
|
|
else:
|
|
|
|
archive = None
|
|
|
|
operations = FuseOperations(key, repository, manifest, archive, cached_repo)
|
|
|
|
logger.info("Mounting filesystem")
|
|
|
|
try:
|
|
|
|
operations.mount(args.mountpoint, args.options, args.foreground)
|
|
|
|
except RuntimeError:
|
|
|
|
# Relevant error message already printed to stderr by fuse
|
|
|
|
self.exit_code = EXIT_ERROR
|
2013-07-24 11:23:51 +00:00
|
|
|
return self.exit_code
|
2013-07-21 22:41:06 +00:00
|
|
|
|
2016-03-22 23:41:15 +00:00
|
|
|
@with_repository()
|
|
|
|
def do_list(self, args, repository, manifest, key):
|
2014-04-06 20:47:22 +00:00
|
|
|
"""List archive or repository contents"""
|
2016-06-19 21:02:12 +00:00
|
|
|
if not hasattr(sys.stdout, 'buffer'):
|
|
|
|
# This is a shim for supporting unit tests replacing sys.stdout with e.g. StringIO,
|
|
|
|
# which doesn't have an underlying buffer (= lower file object).
|
|
|
|
def write(bytestring):
|
|
|
|
sys.stdout.write(bytestring.decode('utf-8', errors='replace'))
|
|
|
|
else:
|
|
|
|
write = sys.stdout.buffer.write
|
|
|
|
|
2015-12-12 12:50:24 +00:00
|
|
|
if args.location.archive:
|
2016-03-17 16:32:23 +00:00
|
|
|
matcher, _ = self.build_matcher(args.excludes, args.paths)
|
|
|
|
with Cache(repository, key, manifest, lock_wait=self.lock_wait) as cache:
|
2016-07-21 20:24:48 +00:00
|
|
|
archive = Archive(repository, key, manifest, args.location.archive, cache=cache,
|
2016-07-21 22:19:56 +00:00
|
|
|
consider_part_files=args.consider_part_files)
|
2016-02-05 18:34:48 +00:00
|
|
|
|
2016-08-16 18:45:32 +00:00
|
|
|
if args.format is not None:
|
2016-03-17 16:32:23 +00:00
|
|
|
format = args.format
|
|
|
|
elif args.short:
|
|
|
|
format = "{path}{NL}"
|
|
|
|
else:
|
|
|
|
format = "{mode} {user:6} {group:6} {size:8} {isomtime} {path}{extra}{NL}"
|
|
|
|
formatter = ItemFormatter(archive, format)
|
|
|
|
|
2016-05-31 23:45:45 +00:00
|
|
|
for item in archive.iter_items(lambda item: matcher.match(item.path)):
|
2016-04-23 20:57:04 +00:00
|
|
|
write(safe_encode(formatter.format_item(item)))
|
2010-10-15 18:35:49 +00:00
|
|
|
else:
|
2016-08-16 18:45:32 +00:00
|
|
|
if args.format is not None:
|
2016-06-20 14:02:49 +00:00
|
|
|
format = args.format
|
|
|
|
elif args.short:
|
|
|
|
format = "{archive}{NL}"
|
|
|
|
else:
|
|
|
|
format = "{archive:<36} {time} [{id}]{NL}"
|
|
|
|
formatter = ArchiveFormatter(format)
|
|
|
|
|
2016-08-15 02:17:41 +00:00
|
|
|
for archive_info in manifest.archives.list(sort_by='ts'):
|
2015-11-06 14:45:49 +00:00
|
|
|
if args.prefix and not archive_info.name.startswith(args.prefix):
|
|
|
|
continue
|
2016-06-20 14:02:49 +00:00
|
|
|
write(safe_encode(formatter.format_item(archive_info)))
|
|
|
|
|
2010-10-30 11:44:25 +00:00
|
|
|
return self.exit_code
|
2010-02-24 22:24:19 +00:00
|
|
|
|
2016-03-22 23:41:15 +00:00
|
|
|
@with_repository(cache=True)
|
2016-07-30 21:16:19 +00:00
|
|
|
def do_info(self, args, repository, manifest, key, cache):
|
2014-04-06 13:16:25 +00:00
|
|
|
"""Show archive details such as disk space used"""
|
2016-04-06 11:33:52 +00:00
|
|
|
def format_cmdline(cmdline):
|
|
|
|
return remove_surrogates(' '.join(shlex.quote(x) for x in cmdline))
|
|
|
|
|
2016-07-30 21:16:19 +00:00
|
|
|
if args.location.archive:
|
|
|
|
archive = Archive(repository, key, manifest, args.location.archive, cache=cache,
|
|
|
|
consider_part_files=args.consider_part_files)
|
|
|
|
stats = archive.calc_stats(cache)
|
|
|
|
print('Archive name: %s' % archive.name)
|
|
|
|
print('Archive fingerprint: %s' % archive.fpr)
|
2016-08-14 23:11:33 +00:00
|
|
|
print('Comment: %s' % archive.metadata.get('comment', ''))
|
|
|
|
print('Hostname: %s' % archive.metadata.hostname)
|
|
|
|
print('Username: %s' % archive.metadata.username)
|
2016-07-30 21:16:19 +00:00
|
|
|
print('Time (start): %s' % format_time(to_localtime(archive.ts)))
|
|
|
|
print('Time (end): %s' % format_time(to_localtime(archive.ts_end)))
|
|
|
|
print('Duration: %s' % archive.duration_from_meta)
|
|
|
|
print('Number of files: %d' % stats.nfiles)
|
2016-08-14 23:11:33 +00:00
|
|
|
print('Command line: %s' % format_cmdline(archive.metadata.cmdline))
|
2016-07-30 21:16:19 +00:00
|
|
|
print(DASHES)
|
|
|
|
print(STATS_HEADER)
|
|
|
|
print(str(stats))
|
|
|
|
print(str(cache))
|
|
|
|
else:
|
|
|
|
print(STATS_HEADER)
|
|
|
|
print(str(cache))
|
2010-10-30 11:44:25 +00:00
|
|
|
return self.exit_code
|
2010-04-18 20:34:21 +00:00
|
|
|
|
2016-07-23 14:16:56 +00:00
|
|
|
@with_repository(exclusive=True)
|
2016-03-22 23:41:15 +00:00
|
|
|
def do_prune(self, args, repository, manifest, key):
|
2014-04-06 13:16:25 +00:00
|
|
|
"""Prune repository archives according to specified rules"""
|
2016-04-19 00:14:03 +00:00
|
|
|
if not any((args.secondly, args.minutely, args.hourly, args.daily,
|
2016-04-18 22:05:44 +00:00
|
|
|
args.weekly, args.monthly, args.yearly, args.within)):
|
2016-04-18 19:56:05 +00:00
|
|
|
self.print_error('At least one of the "keep-within", "keep-last", '
|
|
|
|
'"keep-secondly", "keep-minutely", "keep-hourly", "keep-daily", '
|
2016-04-18 22:05:44 +00:00
|
|
|
'"keep-weekly", "keep-monthly" or "keep-yearly" settings must be specified.')
|
2015-10-25 00:35:42 +00:00
|
|
|
return self.exit_code
|
2016-08-15 02:17:41 +00:00
|
|
|
archives_checkpoints = manifest.archives.list(sort_by='ts', reverse=True) # just a ArchiveInfo list
|
2011-08-11 19:18:13 +00:00
|
|
|
if args.prefix:
|
2016-05-03 21:06:26 +00:00
|
|
|
archives_checkpoints = [arch for arch in archives_checkpoints if arch.name.startswith(args.prefix)]
|
|
|
|
is_checkpoint = re.compile(r'\.checkpoint(\.\d+)?$').search
|
|
|
|
checkpoints = [arch for arch in archives_checkpoints if is_checkpoint(arch.name)]
|
|
|
|
# keep the latest checkpoint, if there is no later non-checkpoint archive
|
2016-05-07 19:03:31 +00:00
|
|
|
if archives_checkpoints and checkpoints and archives_checkpoints[0] is checkpoints[0]:
|
|
|
|
keep_checkpoints = checkpoints[:1]
|
2016-05-03 21:06:26 +00:00
|
|
|
else:
|
|
|
|
keep_checkpoints = []
|
|
|
|
checkpoints = set(checkpoints)
|
2016-05-01 23:12:15 +00:00
|
|
|
# ignore all checkpoint archives to avoid keeping one (which is an incomplete backup)
|
|
|
|
# that is newer than a successfully completed backup - and killing the successful backup.
|
2016-05-03 21:06:26 +00:00
|
|
|
archives = [arch for arch in archives_checkpoints if arch not in checkpoints]
|
2011-08-16 20:02:42 +00:00
|
|
|
keep = []
|
2014-02-08 20:37:27 +00:00
|
|
|
if args.within:
|
|
|
|
keep += prune_within(archives, args.within)
|
2016-04-18 19:56:05 +00:00
|
|
|
if args.secondly:
|
|
|
|
keep += prune_split(archives, '%Y-%m-%d %H:%M:%S', args.secondly, keep)
|
|
|
|
if args.minutely:
|
|
|
|
keep += prune_split(archives, '%Y-%m-%d %H:%M', args.minutely, keep)
|
2011-08-21 20:17:00 +00:00
|
|
|
if args.hourly:
|
2014-02-08 20:37:27 +00:00
|
|
|
keep += prune_split(archives, '%Y-%m-%d %H', args.hourly, keep)
|
2011-08-11 19:18:13 +00:00
|
|
|
if args.daily:
|
2011-11-22 20:47:17 +00:00
|
|
|
keep += prune_split(archives, '%Y-%m-%d', args.daily, keep)
|
2011-08-12 06:49:01 +00:00
|
|
|
if args.weekly:
|
2012-12-10 19:48:39 +00:00
|
|
|
keep += prune_split(archives, '%G-%V', args.weekly, keep)
|
2011-08-12 06:49:01 +00:00
|
|
|
if args.monthly:
|
2011-11-22 20:47:17 +00:00
|
|
|
keep += prune_split(archives, '%Y-%m', args.monthly, keep)
|
2011-08-12 06:49:01 +00:00
|
|
|
if args.yearly:
|
2011-11-22 20:47:17 +00:00
|
|
|
keep += prune_split(archives, '%Y', args.yearly, keep)
|
2016-05-03 21:06:26 +00:00
|
|
|
to_delete = (set(archives) | checkpoints) - (set(keep) | set(keep_checkpoints))
|
2014-03-19 21:32:07 +00:00
|
|
|
stats = Statistics()
|
2016-01-17 00:09:13 +00:00
|
|
|
with Cache(repository, key, manifest, do_files=args.cache_files, lock_wait=self.lock_wait) as cache:
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
list_logger = logging.getLogger('borg.output.list')
|
2016-05-03 21:06:26 +00:00
|
|
|
for archive in archives_checkpoints:
|
2016-04-18 21:05:53 +00:00
|
|
|
if archive in to_delete:
|
|
|
|
if args.dry_run:
|
|
|
|
if args.output_list:
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
list_logger.info('Would prune: %s' % format_archive(archive))
|
2016-04-18 21:05:53 +00:00
|
|
|
else:
|
|
|
|
if args.output_list:
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
list_logger.info('Pruning archive: %s' % format_archive(archive))
|
2016-07-05 23:33:53 +00:00
|
|
|
Archive(repository, key, manifest, archive.name, cache).delete(stats, forced=args.forced)
|
2016-01-17 00:09:13 +00:00
|
|
|
else:
|
2016-02-18 22:44:23 +00:00
|
|
|
if args.output_list:
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
list_logger.info('Keeping archive: %s' % format_archive(archive))
|
2016-01-17 00:09:13 +00:00
|
|
|
if to_delete and not args.dry_run:
|
|
|
|
manifest.write()
|
|
|
|
repository.commit(save_space=args.save_space)
|
|
|
|
cache.commit()
|
|
|
|
if args.stats:
|
|
|
|
log_multi(DASHES,
|
2016-07-30 21:16:19 +00:00
|
|
|
STATS_HEADER,
|
2016-01-17 00:09:13 +00:00
|
|
|
stats.summary.format(label='Deleted data:', stats=stats),
|
|
|
|
str(cache),
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
DASHES, logger=logging.getLogger('borg.output.stats'))
|
2011-08-06 21:33:06 +00:00
|
|
|
return self.exit_code
|
|
|
|
|
2015-10-03 16:36:52 +00:00
|
|
|
def do_upgrade(self, args):
|
|
|
|
"""upgrade a repository from a previous version"""
|
2016-02-07 18:23:06 +00:00
|
|
|
# mainly for upgrades from Attic repositories,
|
|
|
|
# but also supports borg 0.xx -> 1.0 upgrade.
|
2015-10-03 16:36:52 +00:00
|
|
|
|
2015-12-12 12:50:24 +00:00
|
|
|
repo = AtticRepositoryUpgrader(args.location.path, create=False)
|
2016-01-29 00:23:24 +00:00
|
|
|
try:
|
|
|
|
repo.upgrade(args.dry_run, inplace=args.inplace, progress=args.progress)
|
|
|
|
except NotImplementedError as e:
|
|
|
|
print("warning: %s" % e)
|
|
|
|
repo = BorgRepositoryUpgrader(args.location.path, create=False)
|
2015-10-01 03:50:46 +00:00
|
|
|
try:
|
2016-01-16 19:32:24 +00:00
|
|
|
repo.upgrade(args.dry_run, inplace=args.inplace, progress=args.progress)
|
2015-10-01 12:46:30 +00:00
|
|
|
except NotImplementedError as e:
|
2015-10-01 03:50:46 +00:00
|
|
|
print("warning: %s" % e)
|
|
|
|
return self.exit_code
|
|
|
|
|
2016-04-07 09:29:52 +00:00
|
|
|
@with_repository(cache=True, exclusive=True)
|
|
|
|
def do_recreate(self, args, repository, manifest, key, cache):
|
|
|
|
"""Re-create archives"""
|
|
|
|
def interrupt(signal_num, stack_frame):
|
|
|
|
if recreater.interrupt:
|
2016-04-10 13:59:10 +00:00
|
|
|
print("\nReceived signal, again. I'm not deaf.", file=sys.stderr)
|
2016-04-07 09:29:52 +00:00
|
|
|
else:
|
2016-04-10 13:59:10 +00:00
|
|
|
print("\nReceived signal, will exit cleanly.", file=sys.stderr)
|
2016-04-07 09:29:52 +00:00
|
|
|
recreater.interrupt = True
|
|
|
|
|
2016-04-10 14:49:17 +00:00
|
|
|
msg = ("recreate is an experimental feature.\n"
|
|
|
|
"Type 'YES' if you understand this and want to continue: ")
|
|
|
|
if not yes(msg, false_msg="Aborting.", truish=('YES',),
|
|
|
|
env_var_override='BORG_RECREATE_I_KNOW_WHAT_I_AM_DOING'):
|
|
|
|
return EXIT_ERROR
|
|
|
|
|
2016-04-07 09:29:52 +00:00
|
|
|
matcher, include_patterns = self.build_matcher(args.excludes, args.paths)
|
|
|
|
self.output_list = args.output_list
|
|
|
|
self.output_filter = args.output_filter
|
|
|
|
|
|
|
|
recreater = ArchiveRecreater(repository, manifest, key, cache, matcher,
|
|
|
|
exclude_caches=args.exclude_caches, exclude_if_present=args.exclude_if_present,
|
2016-04-18 23:13:10 +00:00
|
|
|
keep_tag_files=args.keep_tag_files, chunker_params=args.chunker_params,
|
|
|
|
compression=args.compression, compression_files=args.compression_files,
|
2016-07-31 21:09:57 +00:00
|
|
|
always_recompress=args.always_recompress,
|
2016-04-07 09:29:52 +00:00
|
|
|
progress=args.progress, stats=args.stats,
|
|
|
|
file_status_printer=self.print_file_status,
|
|
|
|
dry_run=args.dry_run)
|
|
|
|
|
2016-07-14 00:11:11 +00:00
|
|
|
with signal_handler(signal.SIGTERM, interrupt), \
|
|
|
|
signal_handler(signal.SIGINT, interrupt):
|
|
|
|
if args.location.archive:
|
|
|
|
name = args.location.archive
|
2016-04-07 09:29:52 +00:00
|
|
|
if recreater.is_temporary_archive(name):
|
2016-07-14 00:11:11 +00:00
|
|
|
self.print_error('Refusing to work on temporary archive of prior recreate: %s', name)
|
|
|
|
return self.exit_code
|
2016-08-02 13:53:29 +00:00
|
|
|
recreater.recreate(name, args.comment, args.target)
|
2016-07-14 00:11:11 +00:00
|
|
|
else:
|
2016-08-02 13:53:29 +00:00
|
|
|
if args.target is not None:
|
|
|
|
self.print_error('--target: Need to specify single archive')
|
|
|
|
return self.exit_code
|
2016-08-15 02:17:41 +00:00
|
|
|
for archive in manifest.archives.list(sort_by='ts'):
|
2016-07-14 00:11:11 +00:00
|
|
|
name = archive.name
|
|
|
|
if recreater.is_temporary_archive(name):
|
|
|
|
continue
|
|
|
|
print('Processing', name)
|
|
|
|
if not recreater.recreate(name, args.comment):
|
|
|
|
break
|
|
|
|
manifest.write()
|
|
|
|
repository.commit()
|
|
|
|
cache.commit()
|
|
|
|
return self.exit_code
|
2016-04-07 09:29:52 +00:00
|
|
|
|
2016-08-05 23:24:22 +00:00
|
|
|
@with_repository(manifest=False, exclusive=True)
|
2016-04-27 23:28:43 +00:00
|
|
|
def do_with_lock(self, args, repository):
|
|
|
|
"""run a user specified command with the repository lock held"""
|
2016-08-05 23:24:22 +00:00
|
|
|
# for a new server, this will immediately take an exclusive lock.
|
|
|
|
# to support old servers, that do not have "exclusive" arg in open()
|
|
|
|
# RPC API, we also do it the old way:
|
2016-04-27 23:28:43 +00:00
|
|
|
# re-write manifest to start a repository transaction - this causes a
|
|
|
|
# lock upgrade to exclusive for remote (and also for local) repositories.
|
|
|
|
# by using manifest=False in the decorator, we avoid having to require
|
|
|
|
# the encryption key (and can operate just with encrypted data).
|
|
|
|
data = repository.get(Manifest.MANIFEST_ID)
|
|
|
|
repository.put(Manifest.MANIFEST_ID, data)
|
|
|
|
try:
|
|
|
|
# we exit with the return code we get from the subprocess
|
|
|
|
return subprocess.call([args.command] + args.args)
|
|
|
|
finally:
|
|
|
|
repository.rollback()
|
|
|
|
|
2016-08-15 17:20:51 +00:00
|
|
|
def do_debug_info(self, args):
|
|
|
|
"""display system information for debugging / bug reports"""
|
|
|
|
print(sysinfo())
|
|
|
|
return EXIT_SUCCESS
|
|
|
|
|
2016-03-22 23:41:15 +00:00
|
|
|
@with_repository()
|
|
|
|
def do_debug_dump_archive_items(self, args, repository, manifest, key):
|
2015-11-03 21:51:59 +00:00
|
|
|
"""dump (decrypted, decompressed) archive items metadata (not: data)"""
|
2016-07-21 20:24:48 +00:00
|
|
|
archive = Archive(repository, key, manifest, args.location.archive,
|
2016-07-21 22:19:56 +00:00
|
|
|
consider_part_files=args.consider_part_files)
|
2016-08-14 23:11:33 +00:00
|
|
|
for i, item_id in enumerate(archive.metadata.items):
|
2016-03-18 02:16:12 +00:00
|
|
|
_, data = key.decrypt(item_id, repository.get(item_id))
|
2016-04-23 20:42:56 +00:00
|
|
|
filename = '%06d_%s.items' % (i, bin_to_hex(item_id))
|
2015-11-03 21:51:59 +00:00
|
|
|
print('Dumping', filename)
|
|
|
|
with open(filename, 'wb') as fd:
|
|
|
|
fd.write(data)
|
|
|
|
print('Done.')
|
|
|
|
return EXIT_SUCCESS
|
|
|
|
|
2016-08-04 12:45:53 +00:00
|
|
|
@with_repository()
|
|
|
|
def do_debug_dump_repo_objs(self, args, repository, manifest, key):
|
|
|
|
"""dump (decrypted, decompressed) repo objects"""
|
|
|
|
marker = None
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
result = repository.list(limit=10000, marker=marker)
|
|
|
|
if not result:
|
|
|
|
break
|
|
|
|
marker = result[-1]
|
|
|
|
for id in result:
|
|
|
|
cdata = repository.get(id)
|
|
|
|
give_id = id if id != Manifest.MANIFEST_ID else None
|
2016-08-05 23:24:22 +00:00
|
|
|
_, data = key.decrypt(give_id, cdata)
|
|
|
|
filename = '%06d_%s.obj' % (i, bin_to_hex(id))
|
2016-08-04 12:45:53 +00:00
|
|
|
print('Dumping', filename)
|
|
|
|
with open(filename, 'wb') as fd:
|
|
|
|
fd.write(data)
|
|
|
|
i += 1
|
|
|
|
print('Done.')
|
|
|
|
return EXIT_SUCCESS
|
|
|
|
|
2016-03-22 23:41:15 +00:00
|
|
|
@with_repository(manifest=False)
|
|
|
|
def do_debug_get_obj(self, args, repository):
|
2015-11-06 16:45:30 +00:00
|
|
|
"""get object contents from the repository and write it into file"""
|
|
|
|
hex_id = args.id
|
|
|
|
try:
|
|
|
|
id = unhexlify(hex_id)
|
|
|
|
except ValueError:
|
|
|
|
print("object id %s is invalid." % hex_id)
|
|
|
|
else:
|
|
|
|
try:
|
2016-01-30 20:32:45 +00:00
|
|
|
data = repository.get(id)
|
2015-11-06 16:45:30 +00:00
|
|
|
except repository.ObjectNotFound:
|
|
|
|
print("object %s not found." % hex_id)
|
|
|
|
else:
|
|
|
|
with open(args.path, "wb") as f:
|
|
|
|
f.write(data)
|
|
|
|
print("object %s fetched." % hex_id)
|
|
|
|
return EXIT_SUCCESS
|
|
|
|
|
2016-07-23 14:16:56 +00:00
|
|
|
@with_repository(manifest=False, exclusive=True)
|
2016-03-22 23:41:15 +00:00
|
|
|
def do_debug_put_obj(self, args, repository):
|
2015-11-06 16:31:05 +00:00
|
|
|
"""put file(s) contents into the repository"""
|
|
|
|
for path in args.paths:
|
|
|
|
with open(path, "rb") as f:
|
|
|
|
data = f.read()
|
2016-03-12 11:40:39 +00:00
|
|
|
h = hashlib.sha256(data) # XXX hardcoded
|
2015-11-06 16:31:05 +00:00
|
|
|
repository.put(h.digest(), data)
|
|
|
|
print("object %s put." % h.hexdigest())
|
|
|
|
repository.commit()
|
|
|
|
return EXIT_SUCCESS
|
|
|
|
|
2016-07-23 14:16:56 +00:00
|
|
|
@with_repository(manifest=False, exclusive=True)
|
2016-03-22 23:41:15 +00:00
|
|
|
def do_debug_delete_obj(self, args, repository):
|
2015-11-04 00:05:21 +00:00
|
|
|
"""delete the objects with the given IDs from the repo"""
|
|
|
|
modified = False
|
|
|
|
for hex_id in args.ids:
|
|
|
|
try:
|
|
|
|
id = unhexlify(hex_id)
|
|
|
|
except ValueError:
|
|
|
|
print("object id %s is invalid." % hex_id)
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
repository.delete(id)
|
|
|
|
modified = True
|
|
|
|
print("object %s deleted." % hex_id)
|
|
|
|
except repository.ObjectNotFound:
|
|
|
|
print("object %s not found." % hex_id)
|
|
|
|
if modified:
|
|
|
|
repository.commit()
|
|
|
|
print('Done.')
|
|
|
|
return EXIT_SUCCESS
|
|
|
|
|
2016-03-22 23:41:15 +00:00
|
|
|
@with_repository(lock=False, manifest=False)
|
|
|
|
def do_break_lock(self, args, repository):
|
2015-11-21 19:50:53 +00:00
|
|
|
"""Break the repository lock (e.g. in case it was left by a dead borg."""
|
2016-03-22 23:41:15 +00:00
|
|
|
repository.break_lock()
|
|
|
|
Cache.break_lock(repository)
|
2015-11-21 19:50:53 +00:00
|
|
|
return self.exit_code
|
|
|
|
|
2016-07-18 10:14:48 +00:00
|
|
|
helptext = collections.OrderedDict()
|
2016-01-12 11:31:01 +00:00
|
|
|
helptext['patterns'] = textwrap.dedent('''
|
2016-01-21 13:24:32 +00:00
|
|
|
Exclusion patterns support four separate styles, fnmatch, shell, regular
|
2016-07-03 14:27:02 +00:00
|
|
|
expressions and path prefixes. By default, fnmatch is used. If followed
|
|
|
|
by a colon (':') the first two characters of a pattern are used as a
|
|
|
|
style selector. Explicit style selection is necessary when a
|
|
|
|
non-default style is desired or when the desired pattern starts with
|
|
|
|
two alphanumeric characters followed by a colon (i.e. `aa:something/*`).
|
2015-12-15 23:14:02 +00:00
|
|
|
|
2016-01-21 13:24:32 +00:00
|
|
|
`Fnmatch <https://docs.python.org/3/library/fnmatch.html>`_, selector `fm:`
|
|
|
|
|
2016-07-03 14:27:02 +00:00
|
|
|
This is the default style. These patterns use a variant of shell
|
|
|
|
pattern syntax, with '*' matching any number of characters, '?'
|
|
|
|
matching any single character, '[...]' matching any single
|
|
|
|
character specified, including ranges, and '[!...]' matching any
|
|
|
|
character not specified. For the purpose of these patterns, the
|
|
|
|
path separator ('\\' for Windows and '/' on other systems) is not
|
|
|
|
treated specially. Wrap meta-characters in brackets for a literal
|
|
|
|
match (i.e. `[?]` to match the literal character `?`). For a path
|
|
|
|
to match a pattern, it must completely match from start to end, or
|
|
|
|
must match from the start to just before a path separator. Except
|
|
|
|
for the root path, paths will never end in the path separator when
|
|
|
|
matching is attempted. Thus, if a given pattern ends in a path
|
|
|
|
separator, a '*' is appended before matching is attempted.
|
2016-01-21 13:24:35 +00:00
|
|
|
|
|
|
|
Shell-style patterns, selector `sh:`
|
|
|
|
|
|
|
|
Like fnmatch patterns these are similar to shell patterns. The difference
|
|
|
|
is that the pattern may include `**/` for matching zero or more directory
|
|
|
|
levels, `*` for matching zero or more arbitrary characters with the
|
|
|
|
exception of any path separator.
|
2016-01-21 13:24:32 +00:00
|
|
|
|
|
|
|
Regular expressions, selector `re:`
|
|
|
|
|
|
|
|
Regular expressions similar to those found in Perl are supported. Unlike
|
|
|
|
shell patterns regular expressions are not required to match the complete
|
|
|
|
path and any substring match is sufficient. It is strongly recommended to
|
|
|
|
anchor patterns to the start ('^'), to the end ('$') or both. Path
|
|
|
|
separators ('\\' for Windows and '/' on other systems) in paths are
|
|
|
|
always normalized to a forward slash ('/') before applying a pattern. The
|
|
|
|
regular expression syntax is described in the `Python documentation for
|
|
|
|
the re module <https://docs.python.org/3/library/re.html>`_.
|
|
|
|
|
|
|
|
Prefix path, selector `pp:`
|
|
|
|
|
|
|
|
This pattern style is useful to match whole sub-directories. The pattern
|
|
|
|
`pp:/data/bar` matches `/data/bar` and everything therein.
|
2016-01-18 12:09:08 +00:00
|
|
|
|
2015-12-15 23:14:02 +00:00
|
|
|
Exclusions can be passed via the command line option `--exclude`. When used
|
|
|
|
from within a shell the patterns should be quoted to protect them from
|
|
|
|
expansion.
|
|
|
|
|
|
|
|
The `--exclude-from` option permits loading exclusion patterns from a text
|
2016-01-13 13:30:54 +00:00
|
|
|
file with one pattern per line. Lines empty or starting with the number sign
|
|
|
|
('#') after removing whitespace on both ends are ignored. The optional style
|
|
|
|
selector prefix is also supported for patterns loaded from a file. Due to
|
|
|
|
whitespace removal paths with whitespace at the beginning or end can only be
|
|
|
|
excluded using regular expressions.
|
2014-02-25 11:33:23 +00:00
|
|
|
|
2016-06-22 06:44:14 +00:00
|
|
|
Examples::
|
2014-02-25 11:33:23 +00:00
|
|
|
|
2016-06-22 06:44:14 +00:00
|
|
|
# Exclude '/home/user/file.o' but not '/home/user/file.odt':
|
|
|
|
$ borg create -e '*.o' backup /
|
2014-02-25 11:33:23 +00:00
|
|
|
|
2016-06-22 06:44:14 +00:00
|
|
|
# Exclude '/home/user/junk' and '/home/user/subdir/junk' but
|
|
|
|
# not '/home/user/importantjunk' or '/etc/junk':
|
|
|
|
$ borg create -e '/home/*/junk' backup /
|
2014-02-25 11:33:23 +00:00
|
|
|
|
2016-06-22 06:44:14 +00:00
|
|
|
# Exclude the contents of '/home/user/cache' but not the directory itself:
|
|
|
|
$ borg create -e /home/user/cache/ backup /
|
2014-02-25 11:33:23 +00:00
|
|
|
|
2016-06-22 06:44:14 +00:00
|
|
|
# The file '/home/user/cache/important' is *not* backed up:
|
|
|
|
$ borg create -e /home/user/cache/ backup / /home/user/cache/important
|
2015-12-15 23:14:02 +00:00
|
|
|
|
2016-06-22 06:44:14 +00:00
|
|
|
# The contents of directories in '/home' are not backed up when their name
|
|
|
|
# ends in '.tmp'
|
|
|
|
$ borg create --exclude 're:^/home/[^/]+\.tmp/' backup /
|
2015-12-15 23:14:02 +00:00
|
|
|
|
2016-06-22 06:44:14 +00:00
|
|
|
# Load exclusions from file
|
|
|
|
$ cat >exclude.txt <<EOF
|
|
|
|
# Comment line
|
|
|
|
/home/*/junk
|
|
|
|
*.tmp
|
|
|
|
fm:aa:something/*
|
|
|
|
re:^/home/[^/]\.tmp/
|
|
|
|
sh:/home/*/.thumbnails
|
|
|
|
EOF
|
2016-07-10 00:32:58 +00:00
|
|
|
$ borg create --exclude-from exclude.txt backup /\n\n''')
|
2016-06-21 22:31:31 +00:00
|
|
|
helptext['placeholders'] = textwrap.dedent('''
|
2016-07-28 07:30:46 +00:00
|
|
|
Repository (or Archive) URLs, --prefix and --remote-path values support these
|
|
|
|
placeholders:
|
2016-06-21 22:31:31 +00:00
|
|
|
|
|
|
|
{hostname}
|
|
|
|
|
|
|
|
The (short) hostname of the machine.
|
|
|
|
|
|
|
|
{fqdn}
|
|
|
|
|
|
|
|
The full name of the machine.
|
|
|
|
|
|
|
|
{now}
|
|
|
|
|
|
|
|
The current local date and time.
|
|
|
|
|
|
|
|
{utcnow}
|
|
|
|
|
|
|
|
The current UTC date and time.
|
|
|
|
|
|
|
|
{user}
|
|
|
|
|
|
|
|
The user name (or UID, if no name is available) of the user running borg.
|
|
|
|
|
|
|
|
{pid}
|
|
|
|
|
|
|
|
The current process ID.
|
|
|
|
|
2016-07-28 07:30:46 +00:00
|
|
|
{borgversion}
|
|
|
|
|
|
|
|
The version of borg.
|
|
|
|
|
|
|
|
Examples::
|
2016-06-21 22:31:31 +00:00
|
|
|
|
|
|
|
borg create /path/to/repo::{hostname}-{user}-{utcnow} ...
|
|
|
|
borg create /path/to/repo::{hostname}-{now:%Y-%m-%d_%H:%M:%S} ...
|
2016-07-10 00:32:58 +00:00
|
|
|
borg prune --prefix '{hostname}-' ...\n\n''')
|
2014-02-08 14:44:31 +00:00
|
|
|
|
2014-03-21 21:12:15 +00:00
|
|
|
def do_help(self, parser, commands, args):
|
|
|
|
if not args.topic:
|
|
|
|
parser.print_help()
|
|
|
|
elif args.topic in self.helptext:
|
2015-10-02 15:13:01 +00:00
|
|
|
print(self.helptext[args.topic])
|
2014-03-21 21:12:15 +00:00
|
|
|
elif args.topic in commands:
|
2014-04-06 13:16:25 +00:00
|
|
|
if args.epilog_only:
|
2015-10-02 15:13:01 +00:00
|
|
|
print(commands[args.topic].epilog)
|
2014-04-06 13:16:25 +00:00
|
|
|
elif args.usage_only:
|
|
|
|
commands[args.topic].epilog = None
|
|
|
|
commands[args.topic].print_help()
|
|
|
|
else:
|
|
|
|
commands[args.topic].print_help()
|
2014-02-08 14:44:31 +00:00
|
|
|
else:
|
2014-03-21 21:12:15 +00:00
|
|
|
parser.error('No help available on %s' % (args.topic,))
|
2014-02-08 16:51:44 +00:00
|
|
|
return self.exit_code
|
2014-02-08 14:44:31 +00:00
|
|
|
|
2014-02-19 21:46:15 +00:00
|
|
|
def preprocess_args(self, args):
|
|
|
|
deprecations = [
|
2016-01-30 20:32:45 +00:00
|
|
|
# ('--old', '--new', 'Warning: "--old" has been deprecated. Use "--new" instead.'),
|
2016-03-17 16:32:23 +00:00
|
|
|
('--list-format', '--format', 'Warning: "--list-format" has been deprecated. Use "--format" instead.'),
|
2014-02-19 21:46:15 +00:00
|
|
|
]
|
|
|
|
for i, arg in enumerate(args[:]):
|
|
|
|
for old_name, new_name, warning in deprecations:
|
|
|
|
if arg.startswith(old_name):
|
|
|
|
args[i] = arg.replace(old_name, new_name)
|
2016-08-16 18:46:54 +00:00
|
|
|
print(warning, file=sys.stderr)
|
2014-02-19 21:46:15 +00:00
|
|
|
return args
|
|
|
|
|
2016-05-25 20:01:38 +00:00
|
|
|
def build_parser(self, prog=None):
|
2015-10-08 01:07:12 +00:00
|
|
|
common_parser = argparse.ArgumentParser(add_help=False, prog=prog)
|
2016-04-09 23:28:18 +00:00
|
|
|
|
|
|
|
common_group = common_parser.add_argument_group('Common options')
|
|
|
|
common_group.add_argument('-h', '--help', action='help', help='show this help message and exit')
|
|
|
|
common_group.add_argument('--critical', dest='log_level',
|
|
|
|
action='store_const', const='critical', default='warning',
|
|
|
|
help='work on log level CRITICAL')
|
|
|
|
common_group.add_argument('--error', dest='log_level',
|
|
|
|
action='store_const', const='error', default='warning',
|
|
|
|
help='work on log level ERROR')
|
|
|
|
common_group.add_argument('--warning', dest='log_level',
|
|
|
|
action='store_const', const='warning', default='warning',
|
|
|
|
help='work on log level WARNING (default)')
|
|
|
|
common_group.add_argument('--info', '-v', '--verbose', dest='log_level',
|
|
|
|
action='store_const', const='info', default='warning',
|
|
|
|
help='work on log level INFO')
|
|
|
|
common_group.add_argument('--debug', dest='log_level',
|
|
|
|
action='store_const', const='debug', default='warning',
|
|
|
|
help='enable debug output, work on log level DEBUG')
|
2016-08-06 20:37:44 +00:00
|
|
|
common_group.add_argument('--debug-topic', dest='debug_topics',
|
|
|
|
action='append', metavar='TOPIC', default=[],
|
|
|
|
help='enable TOPIC debugging (can be specified multiple times). '
|
|
|
|
'The logger path is borg.debug.<TOPIC> if TOPIC is not fully qualified.')
|
2016-04-09 23:28:18 +00:00
|
|
|
common_group.add_argument('--lock-wait', dest='lock_wait', type=int, metavar='N', default=1,
|
|
|
|
help='wait for the lock, but max. N seconds (default: %(default)d).')
|
|
|
|
common_group.add_argument('--show-version', dest='show_version', action='store_true', default=False,
|
|
|
|
help='show/log the borg version')
|
|
|
|
common_group.add_argument('--show-rc', dest='show_rc', action='store_true', default=False,
|
|
|
|
help='show/log the return code (rc)')
|
|
|
|
common_group.add_argument('--no-files-cache', dest='cache_files', action='store_false',
|
|
|
|
help='do not load/update the file metadata cache used to detect unchanged files')
|
|
|
|
common_group.add_argument('--umask', dest='umask', type=lambda s: int(s, 8), default=UMASK_DEFAULT, metavar='M',
|
|
|
|
help='set umask to M (local and remote, default: %(default)04o)')
|
2016-07-05 23:33:53 +00:00
|
|
|
common_group.add_argument('--remote-path', dest='remote_path', metavar='PATH',
|
|
|
|
help='set remote path to executable (default: "borg")')
|
2016-07-21 22:19:56 +00:00
|
|
|
common_group.add_argument('--consider-part-files', dest='consider_part_files',
|
2016-07-21 20:24:48 +00:00
|
|
|
action='store_true', default=False,
|
2016-07-21 22:19:56 +00:00
|
|
|
help='treat part files like normal files (e.g. to list/extract them)')
|
2010-10-15 18:35:49 +00:00
|
|
|
|
2015-10-16 15:40:22 +00:00
|
|
|
parser = argparse.ArgumentParser(prog=prog, description='Borg - Deduplicated Backups')
|
|
|
|
parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__,
|
2016-04-09 23:28:18 +00:00
|
|
|
help='show version number and exit')
|
2016-01-30 20:32:45 +00:00
|
|
|
subparsers = parser.add_subparsers(title='required arguments', metavar='<command>')
|
2010-10-23 21:01:12 +00:00
|
|
|
|
2015-06-11 20:18:12 +00:00
|
|
|
serve_epilog = textwrap.dedent("""
|
|
|
|
This command starts a repository server process. This command is usually not used manually.
|
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('serve', parents=[common_parser], add_help=False,
|
2015-06-11 20:18:12 +00:00
|
|
|
description=self.do_serve.__doc__, epilog=serve_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='start repository server process')
|
2014-03-24 20:28:59 +00:00
|
|
|
subparser.set_defaults(func=self.do_serve)
|
|
|
|
subparser.add_argument('--restrict-to-path', dest='restrict_to_paths', action='append',
|
|
|
|
metavar='PATH', help='restrict repository access to PATH')
|
2016-06-30 15:59:12 +00:00
|
|
|
subparser.add_argument('--append-only', dest='append_only', action='store_true',
|
|
|
|
help='only allow appending to repository segment files')
|
2014-04-06 13:16:25 +00:00
|
|
|
init_epilog = textwrap.dedent("""
|
|
|
|
This command initializes an empty repository. A repository is a filesystem
|
|
|
|
directory containing the deduplicated data from zero or more archives.
|
2016-05-16 23:00:46 +00:00
|
|
|
|
|
|
|
Encryption can be enabled at repository init time (the default).
|
|
|
|
|
|
|
|
It is not recommended to disable encryption. Repository encryption protects you
|
|
|
|
e.g. against the case that an attacker has access to your backup repository.
|
|
|
|
|
|
|
|
But be careful with the key / the passphrase:
|
|
|
|
|
|
|
|
If you want "passphrase-only" security, use the repokey mode. The key will
|
|
|
|
be stored inside the repository (in its "config" file). In above mentioned
|
|
|
|
attack scenario, the attacker will have the key (but not the passphrase).
|
|
|
|
|
|
|
|
If you want "passphrase and having-the-key" security, use the keyfile mode.
|
|
|
|
The key will be stored in your home directory (in .config/borg/keys). In
|
|
|
|
the attack scenario, the attacker who has just access to your repo won't have
|
|
|
|
the key (and also not the passphrase).
|
|
|
|
|
|
|
|
Make a backup copy of the key file (keyfile mode) or repo config file
|
|
|
|
(repokey mode) and keep it at a safe place, so you still have the key in
|
|
|
|
case it gets corrupted or lost. Also keep the passphrase at a safe place.
|
|
|
|
The backup that is encrypted with that key won't help you with that, of course.
|
|
|
|
|
|
|
|
Make sure you use a good passphrase. Not too short, not too simple. The real
|
|
|
|
encryption / decryption key is encrypted with / locked by your passphrase.
|
|
|
|
If an attacker gets your key, he can't unlock and use it without knowing the
|
|
|
|
passphrase.
|
|
|
|
|
|
|
|
Be careful with special or non-ascii characters in your passphrase:
|
|
|
|
|
|
|
|
- Borg processes the passphrase as unicode (and encodes it as utf-8),
|
|
|
|
so it does not have problems dealing with even the strangest characters.
|
|
|
|
- BUT: that does not necessarily apply to your OS / VM / keyboard configuration.
|
|
|
|
|
|
|
|
So better use a long passphrase made from simple ascii chars than one that
|
|
|
|
includes non-ascii stuff or characters that are hard/impossible to enter on
|
|
|
|
a different keyboard layout.
|
|
|
|
|
|
|
|
You can change your passphrase for existing repos at any time, it won't affect
|
|
|
|
the encryption/decryption key or other secrets.
|
|
|
|
|
|
|
|
When encrypting, AES-CTR-256 is used for encryption, and HMAC-SHA256 for
|
|
|
|
authentication. Hardware acceleration will be used automatically.
|
2014-04-06 13:16:25 +00:00
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('init', parents=[common_parser], add_help=False,
|
2014-04-06 13:16:25 +00:00
|
|
|
description=self.do_init.__doc__, epilog=init_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='initialize empty repository')
|
2011-07-30 19:13:48 +00:00
|
|
|
subparser.set_defaults(func=self.do_init)
|
2015-12-12 12:50:24 +00:00
|
|
|
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
|
2011-07-30 19:13:48 +00:00
|
|
|
type=location_validator(archive=False),
|
2013-07-05 10:32:56 +00:00
|
|
|
help='repository to create')
|
2013-08-10 11:02:20 +00:00
|
|
|
subparser.add_argument('-e', '--encryption', dest='encryption',
|
2016-01-15 05:58:41 +00:00
|
|
|
choices=('none', 'keyfile', 'repokey'), default='repokey',
|
2015-12-19 13:30:05 +00:00
|
|
|
help='select encryption key mode (default: "%(default)s")')
|
2016-07-22 17:58:53 +00:00
|
|
|
subparser.add_argument('-a', '--append-only', dest='append_only', action='store_true',
|
|
|
|
help='create an append-only mode repository')
|
2011-07-30 19:13:48 +00:00
|
|
|
|
2014-02-26 22:13:48 +00:00
|
|
|
check_epilog = textwrap.dedent("""
|
2015-08-08 22:36:17 +00:00
|
|
|
The check command verifies the consistency of a repository and the corresponding archives.
|
|
|
|
|
|
|
|
First, the underlying repository data files are checked:
|
2015-08-29 02:00:22 +00:00
|
|
|
|
2015-08-08 22:36:17 +00:00
|
|
|
- For all segments the segment magic (header) is checked
|
|
|
|
- For all objects stored in the segments, all metadata (e.g. crc and size) and
|
|
|
|
all data is read. The read data is checked by size and CRC. Bit rot and other
|
|
|
|
types of accidental damage can be detected this way.
|
|
|
|
- If we are in repair mode and a integrity error is detected for a segment,
|
|
|
|
we try to recover as many objects from the segment as possible.
|
|
|
|
- In repair mode, it makes sure that the index is consistent with the data
|
|
|
|
stored in the segments.
|
|
|
|
- If you use a remote repo server via ssh:, the repo check is executed on the
|
|
|
|
repo server without causing significant network traffic.
|
|
|
|
- The repository check can be skipped using the --archives-only option.
|
|
|
|
|
|
|
|
Second, the consistency and correctness of the archive metadata is verified:
|
2015-08-29 02:00:22 +00:00
|
|
|
|
2015-08-08 22:36:17 +00:00
|
|
|
- Is the repo manifest present? If not, it is rebuilt from archive metadata
|
2015-08-09 10:52:39 +00:00
|
|
|
chunks (this requires reading and decrypting of all metadata and data).
|
2015-08-08 22:36:17 +00:00
|
|
|
- Check if archive metadata chunk is present. if not, remove archive from
|
|
|
|
manifest.
|
|
|
|
- For all files (items) in the archive, for all chunks referenced by these
|
2016-07-09 18:58:02 +00:00
|
|
|
files, check if chunk is present.
|
|
|
|
If a chunk is not present and we are in repair mode, replace it with a same-size
|
|
|
|
replacement chunk of zeros.
|
|
|
|
If a previously lost chunk reappears (e.g. via a later backup) and we are in
|
|
|
|
repair mode, the all-zero replacement chunk will be replaced by the correct chunk.
|
|
|
|
This requires reading of archive and file metadata, but not data.
|
2015-08-08 22:36:17 +00:00
|
|
|
- If we are in repair mode and we checked all the archives: delete orphaned
|
2015-08-09 10:52:39 +00:00
|
|
|
chunks from the repo.
|
2015-08-08 22:36:17 +00:00
|
|
|
- if you use a remote repo server via ssh:, the archive check is executed on
|
|
|
|
the client machine (because if encryption is enabled, the checks will require
|
|
|
|
decryption and this is always done client-side, because key access will be
|
2015-08-09 10:52:39 +00:00
|
|
|
required).
|
2015-08-08 22:36:17 +00:00
|
|
|
- The archive checks can be time consuming, they can be skipped using the
|
|
|
|
--repository-only option.
|
2016-05-13 20:50:34 +00:00
|
|
|
|
|
|
|
The --verify-data option will perform a full integrity verification (as opposed to
|
|
|
|
checking the CRC32 of the segment) of data, which means reading the data from the
|
|
|
|
repository, decrypting and decompressing it. This is a cryptographic verification,
|
|
|
|
which will detect (accidental) corruption. For encrypted repositories it is
|
|
|
|
tamper-resistant as well, unless the attacker has access to the keys.
|
|
|
|
|
|
|
|
It is also very slow.
|
2014-02-26 22:13:48 +00:00
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('check', parents=[common_parser], add_help=False,
|
2014-02-04 22:49:10 +00:00
|
|
|
description=self.do_check.__doc__,
|
2014-02-26 22:13:48 +00:00
|
|
|
epilog=check_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='verify repository')
|
2014-02-04 22:49:10 +00:00
|
|
|
subparser.set_defaults(func=self.do_check)
|
2015-12-12 12:50:24 +00:00
|
|
|
subparser.add_argument('location', metavar='REPOSITORY_OR_ARCHIVE', nargs='?', default='',
|
2015-08-08 20:11:40 +00:00
|
|
|
type=location_validator(),
|
|
|
|
help='repository or archive to check consistency of')
|
2014-03-04 20:56:37 +00:00
|
|
|
subparser.add_argument('--repository-only', dest='repo_only', action='store_true',
|
|
|
|
default=False,
|
|
|
|
help='only perform repository checks')
|
|
|
|
subparser.add_argument('--archives-only', dest='archives_only', action='store_true',
|
|
|
|
default=False,
|
|
|
|
help='only perform archives checks')
|
2016-05-13 20:50:34 +00:00
|
|
|
subparser.add_argument('--verify-data', dest='verify_data', action='store_true',
|
|
|
|
default=False,
|
|
|
|
help='perform cryptographic archive data integrity verification '
|
|
|
|
'(conflicts with --repository-only)')
|
2014-02-08 23:17:32 +00:00
|
|
|
subparser.add_argument('--repair', dest='repair', action='store_true',
|
|
|
|
default=False,
|
2014-02-26 22:13:48 +00:00
|
|
|
help='attempt to repair any inconsistencies found')
|
2015-11-18 01:27:25 +00:00
|
|
|
subparser.add_argument('--save-space', dest='save_space', action='store_true',
|
|
|
|
default=False,
|
|
|
|
help='work slower, but using less space')
|
2015-03-11 02:04:12 +00:00
|
|
|
subparser.add_argument('--last', dest='last',
|
|
|
|
type=int, default=None, metavar='N',
|
|
|
|
help='only check last N archives (Default: all)')
|
2016-06-21 21:36:30 +00:00
|
|
|
subparser.add_argument('-P', '--prefix', dest='prefix', type=PrefixSpec,
|
2015-12-12 23:39:15 +00:00
|
|
|
help='only consider archive names starting with this prefix')
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
subparser.add_argument('-p', '--progress', dest='progress',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help="""show progress display while checking""")
|
2014-02-04 22:49:10 +00:00
|
|
|
|
2014-04-06 13:16:25 +00:00
|
|
|
change_passphrase_epilog = textwrap.dedent("""
|
|
|
|
The key files used for repository encryption are optionally passphrase
|
|
|
|
protected. This command can be used to change this passphrase.
|
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('change-passphrase', parents=[common_parser], add_help=False,
|
2014-04-06 13:16:25 +00:00
|
|
|
description=self.do_change_passphrase.__doc__,
|
|
|
|
epilog=change_passphrase_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='change repository passphrase')
|
2012-12-04 22:02:10 +00:00
|
|
|
subparser.set_defaults(func=self.do_change_passphrase)
|
2015-12-12 12:50:24 +00:00
|
|
|
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
|
2013-07-31 18:51:01 +00:00
|
|
|
type=location_validator(archive=False))
|
2011-10-27 20:17:47 +00:00
|
|
|
|
2016-01-15 05:34:09 +00:00
|
|
|
migrate_to_repokey_epilog = textwrap.dedent("""
|
|
|
|
This command migrates a repository from passphrase mode (not supported any
|
|
|
|
more) to repokey mode.
|
|
|
|
|
|
|
|
You will be first asked for the repository passphrase (to open it in passphrase
|
|
|
|
mode). This is the same passphrase as you used to use for this repo before 1.0.
|
|
|
|
|
|
|
|
It will then derive the different secrets from this passphrase.
|
|
|
|
|
|
|
|
Then you will be asked for a new passphrase (twice, for safety). This
|
|
|
|
passphrase will be used to protect the repokey (which contains these same
|
|
|
|
secrets in encrypted form). You may use the same passphrase as you used to
|
|
|
|
use, but you may also use a different one.
|
|
|
|
|
|
|
|
After migrating to repokey mode, you can change the passphrase at any time.
|
|
|
|
But please note: the secrets will always stay the same and they could always
|
|
|
|
be derived from your (old) passphrase-mode passphrase.
|
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('migrate-to-repokey', parents=[common_parser], add_help=False,
|
2016-01-15 05:34:09 +00:00
|
|
|
description=self.do_migrate_to_repokey.__doc__,
|
|
|
|
epilog=migrate_to_repokey_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='migrate passphrase-mode repository to repokey')
|
2016-01-15 05:34:09 +00:00
|
|
|
subparser.set_defaults(func=self.do_migrate_to_repokey)
|
|
|
|
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
|
|
|
|
type=location_validator(archive=False))
|
|
|
|
|
2014-04-06 13:16:25 +00:00
|
|
|
create_epilog = textwrap.dedent("""
|
|
|
|
This command creates a backup archive containing all files found while recursively
|
|
|
|
traversing all paths specified. The archive will consume almost no disk space for
|
|
|
|
files or parts of files that have already been stored in other archives.
|
|
|
|
|
2016-05-01 23:12:15 +00:00
|
|
|
The archive name needs to be unique. It must not end in '.checkpoint' or
|
|
|
|
'.checkpoint.N' (with N being a number), because these names are used for
|
|
|
|
checkpoints and treated in special ways.
|
2016-03-15 18:08:36 +00:00
|
|
|
|
2016-05-05 11:38:08 +00:00
|
|
|
In the archive name, you may use the following format tags:
|
2016-08-05 23:24:22 +00:00
|
|
|
{now}, {utcnow}, {fqdn}, {hostname}, {user}, {pid}, {uuid4}, {borgversion}
|
2016-05-05 11:38:08 +00:00
|
|
|
|
2016-03-15 18:08:36 +00:00
|
|
|
To speed up pulling backups over sshfs and similar network file systems which do
|
|
|
|
not provide correct inode information the --ignore-inode flag can be used. This
|
|
|
|
potentially decreases reliability of change detection, while avoiding always reading
|
|
|
|
all files on these file systems.
|
|
|
|
|
2015-06-28 12:02:38 +00:00
|
|
|
See the output of the "borg help patterns" command for more help on exclude patterns.
|
2016-06-21 22:31:31 +00:00
|
|
|
See the output of the "borg help placeholders" command for more help on placeholders.
|
2014-04-06 13:16:25 +00:00
|
|
|
""")
|
2014-02-08 14:44:31 +00:00
|
|
|
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('create', parents=[common_parser], add_help=False,
|
2014-02-08 14:44:31 +00:00
|
|
|
description=self.do_create.__doc__,
|
2014-04-06 13:16:25 +00:00
|
|
|
epilog=create_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='create backup')
|
2010-10-15 18:35:49 +00:00
|
|
|
subparser.set_defaults(func=self.do_create)
|
2016-04-09 23:28:18 +00:00
|
|
|
|
|
|
|
subparser.add_argument('-n', '--dry-run', dest='dry_run',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='do not create a backup archive')
|
|
|
|
|
2011-08-07 15:10:21 +00:00
|
|
|
subparser.add_argument('-s', '--stats', dest='stats',
|
|
|
|
action='store_true', default=False,
|
2013-07-05 10:32:56 +00:00
|
|
|
help='print statistics for the created archive')
|
2015-12-10 09:28:43 +00:00
|
|
|
subparser.add_argument('-p', '--progress', dest='progress',
|
|
|
|
action='store_true', default=False,
|
2016-04-09 23:28:18 +00:00
|
|
|
help='show progress display while creating the archive, showing Original, '
|
|
|
|
'Compressed and Deduplicated sizes, followed by the Number of files seen '
|
|
|
|
'and the path being processed, default: %(default)s')
|
2016-01-14 17:57:05 +00:00
|
|
|
subparser.add_argument('--list', dest='output_list',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='output verbose list of items (files, dirs, ...)')
|
2015-12-02 01:55:59 +00:00
|
|
|
subparser.add_argument('--filter', dest='output_filter', metavar='STATUSCHARS',
|
|
|
|
help='only display items with the given status characters')
|
2016-04-09 23:28:18 +00:00
|
|
|
|
|
|
|
exclude_group = subparser.add_argument_group('Exclusion options')
|
|
|
|
exclude_group.add_argument('-e', '--exclude', dest='excludes',
|
|
|
|
type=parse_pattern, action='append',
|
|
|
|
metavar="PATTERN", help='exclude paths matching PATTERN')
|
|
|
|
exclude_group.add_argument('--exclude-from', dest='exclude_files',
|
|
|
|
type=argparse.FileType('r'), action='append',
|
|
|
|
metavar='EXCLUDEFILE', help='read exclude patterns from EXCLUDEFILE, one per line')
|
|
|
|
exclude_group.add_argument('--exclude-caches', dest='exclude_caches',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='exclude directories that contain a CACHEDIR.TAG file ('
|
|
|
|
'http://www.brynosaurus.com/cachedir/spec.html)')
|
|
|
|
exclude_group.add_argument('--exclude-if-present', dest='exclude_if_present',
|
|
|
|
metavar='FILENAME', action='append', type=str,
|
|
|
|
help='exclude directories that contain the specified file')
|
|
|
|
exclude_group.add_argument('--keep-tag-files', dest='keep_tag_files',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='keep tag files of excluded caches/directories')
|
|
|
|
|
|
|
|
fs_group = subparser.add_argument_group('Filesystem options')
|
|
|
|
fs_group.add_argument('-x', '--one-file-system', dest='one_file_system',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='stay in same file system, do not cross mount points')
|
|
|
|
fs_group.add_argument('--numeric-owner', dest='numeric_owner',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='only store numeric user and group identifiers')
|
|
|
|
fs_group.add_argument('--ignore-inode', dest='ignore_inode',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='ignore inode data in the file metadata cache used to detect unchanged files.')
|
|
|
|
fs_group.add_argument('--read-special', dest='read_special',
|
|
|
|
action='store_true', default=False,
|
2016-07-04 17:07:37 +00:00
|
|
|
help='open and read block and char device files as well as FIFOs as if they were '
|
|
|
|
'regular files. Also follows symlinks pointing to these kinds of files.')
|
2016-04-09 23:28:18 +00:00
|
|
|
|
|
|
|
archive_group = subparser.add_argument_group('Archive options')
|
|
|
|
archive_group.add_argument('--comment', dest='comment', metavar='COMMENT', default='',
|
|
|
|
help='add a comment text to the archive')
|
|
|
|
archive_group.add_argument('--timestamp', dest='timestamp',
|
|
|
|
type=timestamp, default=None,
|
|
|
|
metavar='yyyy-mm-ddThh:mm:ss',
|
|
|
|
help='manually specify the archive creation date/time (UTC). '
|
|
|
|
'alternatively, give a reference file/directory.')
|
|
|
|
archive_group.add_argument('-c', '--checkpoint-interval', dest='checkpoint_interval',
|
2016-06-27 18:05:20 +00:00
|
|
|
type=int, default=1800, metavar='SECONDS',
|
|
|
|
help='write checkpoint every SECONDS seconds (Default: 1800)')
|
2016-04-09 23:28:18 +00:00
|
|
|
archive_group.add_argument('--chunker-params', dest='chunker_params',
|
|
|
|
type=ChunkerParams, default=CHUNKER_PARAMS,
|
|
|
|
metavar='CHUNK_MIN_EXP,CHUNK_MAX_EXP,HASH_MASK_BITS,HASH_WINDOW_SIZE',
|
|
|
|
help='specify the chunker parameters. default: %d,%d,%d,%d' % CHUNKER_PARAMS)
|
|
|
|
archive_group.add_argument('-C', '--compression', dest='compression',
|
|
|
|
type=CompressionSpec, default=dict(name='none'), metavar='COMPRESSION',
|
|
|
|
help='select compression algorithm (and level):\n'
|
|
|
|
'none == no compression (default),\n'
|
2016-07-30 19:21:45 +00:00
|
|
|
'auto,C[,L] == built-in heuristic (try with lz4 whether the data is\n'
|
|
|
|
' compressible) decides between none or C[,L] - with C[,L]\n'
|
2016-05-02 19:50:59 +00:00
|
|
|
' being any valid compression algorithm (and optional level),\n'
|
2016-04-09 23:28:18 +00:00
|
|
|
'lz4 == lz4,\n'
|
|
|
|
'zlib == zlib (default level 6),\n'
|
|
|
|
'zlib,0 .. zlib,9 == zlib (with level 0..9),\n'
|
|
|
|
'lzma == lzma (default level 6),\n'
|
|
|
|
'lzma,0 .. lzma,9 == lzma (with level 0..9).')
|
2016-04-18 23:13:10 +00:00
|
|
|
archive_group.add_argument('--compression-from', dest='compression_files',
|
|
|
|
type=argparse.FileType('r'), action='append',
|
|
|
|
metavar='COMPRESSIONCONFIG', help='read compression patterns from COMPRESSIONCONFIG, one per line')
|
2016-04-09 23:28:18 +00:00
|
|
|
|
2015-12-12 12:50:24 +00:00
|
|
|
subparser.add_argument('location', metavar='ARCHIVE',
|
2010-10-15 18:35:49 +00:00
|
|
|
type=location_validator(archive=True),
|
2015-09-19 14:09:20 +00:00
|
|
|
help='name of archive to create (must be also a valid directory name)')
|
2013-06-30 20:32:27 +00:00
|
|
|
subparser.add_argument('paths', metavar='PATH', nargs='+', type=str,
|
2013-07-05 10:32:56 +00:00
|
|
|
help='paths to archive')
|
2010-10-15 18:35:49 +00:00
|
|
|
|
2014-04-06 13:16:25 +00:00
|
|
|
extract_epilog = textwrap.dedent("""
|
|
|
|
This command extracts the contents of an archive. By default the entire
|
|
|
|
archive is extracted but a subset of files and directories can be selected
|
|
|
|
by passing a list of ``PATHs`` as arguments. The file selection can further
|
|
|
|
be restricted by using the ``--exclude`` option.
|
2014-02-08 14:44:31 +00:00
|
|
|
|
2015-06-28 12:02:38 +00:00
|
|
|
See the output of the "borg help patterns" command for more help on exclude patterns.
|
2016-05-07 16:53:58 +00:00
|
|
|
|
|
|
|
By using ``--dry-run``, you can do all extraction steps except actually writing the
|
|
|
|
output data: reading metadata and data chunks from the repo, checking the hash/hmac,
|
|
|
|
decrypting, decompressing.
|
2014-04-06 13:16:25 +00:00
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('extract', parents=[common_parser], add_help=False,
|
2014-02-08 14:44:31 +00:00
|
|
|
description=self.do_extract.__doc__,
|
2014-04-06 13:16:25 +00:00
|
|
|
epilog=extract_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='extract archive contents')
|
2010-10-15 18:35:49 +00:00
|
|
|
subparser.set_defaults(func=self.do_extract)
|
2016-01-28 19:25:55 +00:00
|
|
|
subparser.add_argument('--list', dest='output_list',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='output verbose list of items (files, dirs, ...)')
|
2014-02-19 21:46:15 +00:00
|
|
|
subparser.add_argument('-n', '--dry-run', dest='dry_run',
|
2014-02-18 20:33:06 +00:00
|
|
|
default=False, action='store_true',
|
|
|
|
help='do not actually change any files')
|
2013-06-30 20:32:27 +00:00
|
|
|
subparser.add_argument('-e', '--exclude', dest='excludes',
|
2015-12-15 23:14:02 +00:00
|
|
|
type=parse_pattern, action='append',
|
2013-07-05 10:32:56 +00:00
|
|
|
metavar="PATTERN", help='exclude paths matching PATTERN')
|
2014-02-08 17:44:48 +00:00
|
|
|
subparser.add_argument('--exclude-from', dest='exclude_files',
|
|
|
|
type=argparse.FileType('r'), action='append',
|
|
|
|
metavar='EXCLUDEFILE', help='read exclude patterns from EXCLUDEFILE, one per line')
|
2012-02-29 22:59:17 +00:00
|
|
|
subparser.add_argument('--numeric-owner', dest='numeric_owner',
|
|
|
|
action='store_true', default=False,
|
2013-07-05 10:32:56 +00:00
|
|
|
help='only obey numeric user and group identifiers')
|
2014-08-02 20:15:21 +00:00
|
|
|
subparser.add_argument('--strip-components', dest='strip_components',
|
|
|
|
type=int, default=0, metavar='NUMBER',
|
|
|
|
help='Remove the specified number of leading path elements. Pathnames with fewer elements will be silently skipped.')
|
2015-03-01 04:07:29 +00:00
|
|
|
subparser.add_argument('--stdout', dest='stdout',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='write all extracted data to stdout')
|
2015-04-17 20:28:40 +00:00
|
|
|
subparser.add_argument('--sparse', dest='sparse',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='create holes in output sparse file from all-zero chunks')
|
2015-12-12 12:50:24 +00:00
|
|
|
subparser.add_argument('location', metavar='ARCHIVE',
|
2010-10-15 18:35:49 +00:00
|
|
|
type=location_validator(archive=True),
|
2013-07-05 10:32:56 +00:00
|
|
|
help='archive to extract')
|
2013-06-30 20:32:27 +00:00
|
|
|
subparser.add_argument('paths', metavar='PATH', nargs='*', type=str,
|
2016-01-18 15:45:42 +00:00
|
|
|
help='paths to extract; patterns are supported')
|
2010-10-15 18:35:49 +00:00
|
|
|
|
2016-03-12 11:40:39 +00:00
|
|
|
diff_epilog = textwrap.dedent("""
|
|
|
|
This command finds differences in files (contents, user, group, mode) between archives.
|
|
|
|
|
|
|
|
Both archives need to be in the same repository, and a repository location may only
|
|
|
|
be specified for ARCHIVE1.
|
|
|
|
|
2016-03-18 14:42:32 +00:00
|
|
|
For archives created with Borg 1.1 or newer diff automatically detects whether
|
|
|
|
the archives are created with the same chunker params. If so, only chunk IDs
|
2016-03-18 12:30:39 +00:00
|
|
|
are compared, which is very fast.
|
|
|
|
|
|
|
|
For archives prior to Borg 1.1 chunk contents are compared by default.
|
|
|
|
If you did not create the archives with different chunker params,
|
|
|
|
pass --same-chunker-params.
|
|
|
|
Note that the chunker params changed from Borg 0.xx to 1.0.
|
2016-03-18 11:17:57 +00:00
|
|
|
|
2016-03-12 11:40:39 +00:00
|
|
|
See the output of the "borg help patterns" command for more help on exclude patterns.
|
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('diff', parents=[common_parser], add_help=False,
|
2016-03-12 11:40:39 +00:00
|
|
|
description=self.do_diff.__doc__,
|
|
|
|
epilog=diff_epilog,
|
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='find differences in archive contents')
|
|
|
|
subparser.set_defaults(func=self.do_diff)
|
|
|
|
subparser.add_argument('-e', '--exclude', dest='excludes',
|
|
|
|
type=parse_pattern, action='append',
|
|
|
|
metavar="PATTERN", help='exclude paths matching PATTERN')
|
|
|
|
subparser.add_argument('--exclude-from', dest='exclude_files',
|
|
|
|
type=argparse.FileType('r'), action='append',
|
|
|
|
metavar='EXCLUDEFILE', help='read exclude patterns from EXCLUDEFILE, one per line')
|
|
|
|
subparser.add_argument('--numeric-owner', dest='numeric_owner',
|
|
|
|
action='store_true', default=False,
|
2016-03-15 17:19:44 +00:00
|
|
|
help='only consider numeric user and group identifiers')
|
2016-03-12 11:40:39 +00:00
|
|
|
subparser.add_argument('--same-chunker-params', dest='same_chunker_params',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='Override check of chunker parameters.')
|
2016-03-31 07:33:03 +00:00
|
|
|
subparser.add_argument('--sort', dest='sort',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='Sort the output lines by file path.')
|
2016-03-12 11:40:39 +00:00
|
|
|
subparser.add_argument('location', metavar='ARCHIVE1',
|
|
|
|
type=location_validator(archive=True),
|
|
|
|
help='archive')
|
|
|
|
subparser.add_argument('archive2', metavar='ARCHIVE2',
|
2016-03-15 17:19:34 +00:00
|
|
|
type=archivename_validator(),
|
2016-03-12 11:40:39 +00:00
|
|
|
help='archive to compare with ARCHIVE1 (no repository location)')
|
|
|
|
subparser.add_argument('paths', metavar='PATH', nargs='*', type=str,
|
|
|
|
help='paths to compare; patterns are supported')
|
|
|
|
|
2015-03-24 06:11:00 +00:00
|
|
|
rename_epilog = textwrap.dedent("""
|
|
|
|
This command renames an archive in the repository.
|
2016-04-09 23:28:18 +00:00
|
|
|
|
|
|
|
This results in a different archive ID.
|
2015-03-24 06:11:00 +00:00
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('rename', parents=[common_parser], add_help=False,
|
2015-03-24 06:11:00 +00:00
|
|
|
description=self.do_rename.__doc__,
|
|
|
|
epilog=rename_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='rename archive')
|
2015-03-24 06:11:00 +00:00
|
|
|
subparser.set_defaults(func=self.do_rename)
|
2015-12-12 12:50:24 +00:00
|
|
|
subparser.add_argument('location', metavar='ARCHIVE',
|
2015-03-24 06:11:00 +00:00
|
|
|
type=location_validator(archive=True),
|
|
|
|
help='archive to rename')
|
2016-02-19 22:01:20 +00:00
|
|
|
subparser.add_argument('name', metavar='NEWNAME',
|
|
|
|
type=archivename_validator(),
|
2015-03-24 06:11:00 +00:00
|
|
|
help='the new archive name to use')
|
|
|
|
|
2014-04-06 13:16:25 +00:00
|
|
|
delete_epilog = textwrap.dedent("""
|
2015-03-09 15:02:06 +00:00
|
|
|
This command deletes an archive from the repository or the complete repository.
|
|
|
|
Disk space is reclaimed accordingly. If you delete the complete repository, the
|
|
|
|
local cache for it (if any) is also deleted.
|
2014-04-06 13:16:25 +00:00
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('delete', parents=[common_parser], add_help=False,
|
2014-04-06 13:16:25 +00:00
|
|
|
description=self.do_delete.__doc__,
|
|
|
|
epilog=delete_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='delete archive')
|
2010-10-15 18:35:49 +00:00
|
|
|
subparser.set_defaults(func=self.do_delete)
|
2016-01-16 19:46:49 +00:00
|
|
|
subparser.add_argument('-p', '--progress', dest='progress',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help="""show progress display while deleting a single archive""")
|
2014-03-19 21:32:07 +00:00
|
|
|
subparser.add_argument('-s', '--stats', dest='stats',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='print statistics for the deleted archive')
|
2015-10-03 17:29:45 +00:00
|
|
|
subparser.add_argument('-c', '--cache-only', dest='cache_only',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='delete only the local cache for the given repository')
|
2016-07-01 02:27:06 +00:00
|
|
|
subparser.add_argument('--force', dest='forced',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='force deletion of corrupted archives')
|
2015-11-18 01:27:25 +00:00
|
|
|
subparser.add_argument('--save-space', dest='save_space', action='store_true',
|
|
|
|
default=False,
|
|
|
|
help='work slower, but using less space')
|
2015-12-12 12:50:24 +00:00
|
|
|
subparser.add_argument('location', metavar='TARGET', nargs='?', default='',
|
2015-03-09 15:02:06 +00:00
|
|
|
type=location_validator(),
|
|
|
|
help='archive or repository to delete')
|
2010-10-15 18:35:49 +00:00
|
|
|
|
2014-04-06 13:16:25 +00:00
|
|
|
list_epilog = textwrap.dedent("""
|
|
|
|
This command lists the contents of a repository or an archive.
|
2016-03-17 16:32:23 +00:00
|
|
|
|
|
|
|
See the "borg help patterns" command for more help on exclude patterns.
|
|
|
|
|
2016-06-21 13:18:19 +00:00
|
|
|
The following keys are available for --format:
|
|
|
|
""") + BaseFormatter.keys_help() + textwrap.dedent("""
|
2016-03-17 16:32:23 +00:00
|
|
|
|
2016-06-21 13:18:19 +00:00
|
|
|
-- Keys for listing repository archives:
|
|
|
|
""") + ArchiveFormatter.keys_help() + textwrap.dedent("""
|
2016-03-17 16:32:23 +00:00
|
|
|
|
2016-06-21 13:18:19 +00:00
|
|
|
-- Keys for listing archive files:
|
2016-03-17 16:32:23 +00:00
|
|
|
""") + ItemFormatter.keys_help()
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('list', parents=[common_parser], add_help=False,
|
2014-04-06 13:16:25 +00:00
|
|
|
description=self.do_list.__doc__,
|
|
|
|
epilog=list_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='list archive or repository contents')
|
2010-10-15 18:35:49 +00:00
|
|
|
subparser.set_defaults(func=self.do_list)
|
2015-08-15 18:52:14 +00:00
|
|
|
subparser.add_argument('--short', dest='short',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='only print file/directory names, nothing else')
|
2016-03-17 16:32:23 +00:00
|
|
|
subparser.add_argument('--format', '--list-format', dest='format', type=str,
|
|
|
|
help="""specify format for file listing
|
|
|
|
(default: "{mode} {user:6} {group:6} {size:8d} {isomtime} {path}{extra}{NL}")""")
|
2016-06-21 21:36:30 +00:00
|
|
|
subparser.add_argument('-P', '--prefix', dest='prefix', type=PrefixSpec,
|
2015-11-06 14:45:49 +00:00
|
|
|
help='only consider archive names starting with this prefix')
|
2016-03-17 16:32:23 +00:00
|
|
|
subparser.add_argument('-e', '--exclude', dest='excludes',
|
|
|
|
type=parse_pattern, action='append',
|
|
|
|
metavar="PATTERN", help='exclude paths matching PATTERN')
|
|
|
|
subparser.add_argument('--exclude-from', dest='exclude_files',
|
|
|
|
type=argparse.FileType('r'), action='append',
|
|
|
|
metavar='EXCLUDEFILE', help='read exclude patterns from EXCLUDEFILE, one per line')
|
2015-12-12 12:50:24 +00:00
|
|
|
subparser.add_argument('location', metavar='REPOSITORY_OR_ARCHIVE', nargs='?', default='',
|
2015-09-06 16:18:24 +00:00
|
|
|
type=location_validator(),
|
2013-07-24 11:23:51 +00:00
|
|
|
help='repository/archive to list contents of')
|
2016-03-17 16:32:23 +00:00
|
|
|
subparser.add_argument('paths', metavar='PATH', nargs='*', type=str,
|
2016-03-18 11:17:57 +00:00
|
|
|
help='paths to list; patterns are supported')
|
2015-11-06 14:45:49 +00:00
|
|
|
|
2014-04-06 13:16:25 +00:00
|
|
|
mount_epilog = textwrap.dedent("""
|
|
|
|
This command mounts an archive as a FUSE filesystem. This can be useful for
|
|
|
|
browsing an archive or restoring individual files. Unless the ``--foreground``
|
|
|
|
option is given the command will run in the background until the filesystem
|
|
|
|
is ``umounted``.
|
2016-03-17 00:40:17 +00:00
|
|
|
|
|
|
|
The command ``borgfs`` provides a wrapper for ``borg mount``. This can also be
|
|
|
|
used in fstab entries:
|
|
|
|
``/path/to/repo /mnt/point fuse.borgfs defaults,noauto 0 0``
|
|
|
|
|
|
|
|
To allow a regular user to use fstab entries, add the ``user`` option:
|
|
|
|
``/path/to/repo /mnt/point fuse.borgfs defaults,noauto,user 0 0``
|
2016-04-23 16:03:05 +00:00
|
|
|
|
2016-07-10 00:19:27 +00:00
|
|
|
For mount options, see the fuse(8) manual page. Additional mount options
|
|
|
|
supported by borg:
|
|
|
|
|
|
|
|
- allow_damaged_files: by default damaged files (where missing chunks were
|
|
|
|
replaced with runs of zeros by borg check --repair) are not readable and
|
|
|
|
return EIO (I/O error). Set this option to read such files.
|
2016-07-10 23:23:27 +00:00
|
|
|
|
2016-04-23 16:03:05 +00:00
|
|
|
The BORG_MOUNT_DATA_CACHE_ENTRIES environment variable is meant for advanced users
|
|
|
|
to tweak the performance. It sets the number of cached data chunks; additional
|
|
|
|
memory usage can be up to ~8 MiB times this number. The default is the number
|
|
|
|
of CPU cores.
|
2014-04-06 13:16:25 +00:00
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('mount', parents=[common_parser], add_help=False,
|
2014-04-06 13:16:25 +00:00
|
|
|
description=self.do_mount.__doc__,
|
|
|
|
epilog=mount_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='mount repository')
|
2013-07-21 22:41:06 +00:00
|
|
|
subparser.set_defaults(func=self.do_mount)
|
2015-12-12 12:50:24 +00:00
|
|
|
subparser.add_argument('location', metavar='REPOSITORY_OR_ARCHIVE', type=location_validator(),
|
2014-03-26 20:47:01 +00:00
|
|
|
help='repository/archive to mount')
|
2013-07-24 11:23:51 +00:00
|
|
|
subparser.add_argument('mountpoint', metavar='MOUNTPOINT', type=str,
|
2013-07-24 11:05:47 +00:00
|
|
|
help='where to mount filesystem')
|
2013-07-24 11:23:51 +00:00
|
|
|
subparser.add_argument('-f', '--foreground', dest='foreground',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='stay in foreground, do not daemonize')
|
2013-07-27 12:44:12 +00:00
|
|
|
subparser.add_argument('-o', dest='options', type=str,
|
|
|
|
help='Extra mount options')
|
2013-07-21 22:41:06 +00:00
|
|
|
|
2014-04-06 13:16:25 +00:00
|
|
|
info_epilog = textwrap.dedent("""
|
2016-07-30 21:16:19 +00:00
|
|
|
This command displays detailed information about the specified archive or repository.
|
2016-04-16 20:35:50 +00:00
|
|
|
|
2016-07-30 21:16:19 +00:00
|
|
|
The "This archive" line refers exclusively to the given archive:
|
|
|
|
"Deduplicated size" is the size of the unique chunks stored only for the
|
|
|
|
given archive.
|
|
|
|
|
|
|
|
The "All archives" line shows global statistics (all chunks).
|
2014-04-06 13:16:25 +00:00
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('info', parents=[common_parser], add_help=False,
|
2014-04-06 13:16:25 +00:00
|
|
|
description=self.do_info.__doc__,
|
|
|
|
epilog=info_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
2016-07-30 21:16:19 +00:00
|
|
|
help='show repository or archive information')
|
2010-10-15 18:35:49 +00:00
|
|
|
subparser.set_defaults(func=self.do_info)
|
2016-07-30 21:16:19 +00:00
|
|
|
subparser.add_argument('location', metavar='REPOSITORY_OR_ARCHIVE',
|
|
|
|
type=location_validator(),
|
|
|
|
help='archive or repository to display information about')
|
2010-10-15 18:35:49 +00:00
|
|
|
|
2015-11-21 19:50:53 +00:00
|
|
|
break_lock_epilog = textwrap.dedent("""
|
|
|
|
This command breaks the repository and cache locks.
|
|
|
|
Please use carefully and only while no borg process (on any machine) is
|
|
|
|
trying to access the Cache or the Repository.
|
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('break-lock', parents=[common_parser], add_help=False,
|
2015-11-21 19:50:53 +00:00
|
|
|
description=self.do_break_lock.__doc__,
|
|
|
|
epilog=break_lock_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='break repository and cache locks')
|
2015-11-21 19:50:53 +00:00
|
|
|
subparser.set_defaults(func=self.do_break_lock)
|
2016-07-13 18:04:20 +00:00
|
|
|
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
|
2015-11-21 19:50:53 +00:00
|
|
|
type=location_validator(archive=False),
|
|
|
|
help='repository for which to break the locks')
|
|
|
|
|
2014-04-06 13:16:25 +00:00
|
|
|
prune_epilog = textwrap.dedent("""
|
2016-06-06 14:11:22 +00:00
|
|
|
The prune command prunes a repository by deleting all archives not matching
|
2014-04-06 13:16:25 +00:00
|
|
|
any of the specified retention options. This command is normally used by
|
|
|
|
automated backup scripts wanting to keep a certain number of historic backups.
|
|
|
|
|
2016-05-03 21:06:26 +00:00
|
|
|
Also, prune automatically removes checkpoint archives (incomplete archives left
|
|
|
|
behind by interrupted backup runs) except if the checkpoint is the latest
|
|
|
|
archive (and thus still needed). Checkpoint archives are not considered when
|
|
|
|
comparing archive counts against the retention limits (--keep-*).
|
|
|
|
|
2016-04-18 20:38:20 +00:00
|
|
|
If a prefix is set with -P, then only archives that start with the prefix are
|
|
|
|
considered for deletion and only those archives count towards the totals
|
|
|
|
specified by the rules.
|
|
|
|
Otherwise, *all* archives in the repository are candidates for deletion!
|
2014-04-06 13:16:25 +00:00
|
|
|
|
2016-05-03 21:06:26 +00:00
|
|
|
If you have multiple sequences of archives with different data sets (e.g.
|
|
|
|
from different machines) in one shared repository, use one prune call per
|
|
|
|
data set that matches only the respective archives using the -P option.
|
|
|
|
|
2014-02-25 00:32:18 +00:00
|
|
|
The "--keep-within" option takes an argument of the form "<int><char>",
|
|
|
|
where char is "H", "d", "w", "m", "y". For example, "--keep-within 2d" means
|
2014-02-08 20:37:27 +00:00
|
|
|
to keep all archives that were created within the past 48 hours.
|
|
|
|
"1m" is taken to mean "31d". The archives kept with this option do not
|
2014-04-06 13:16:25 +00:00
|
|
|
count towards the totals specified by any other options.
|
2014-02-04 01:11:47 +00:00
|
|
|
|
2016-04-18 20:38:20 +00:00
|
|
|
A good procedure is to thin out more and more the older your backups get.
|
|
|
|
As an example, "--keep-daily 7" means to keep the latest backup on each day,
|
|
|
|
up to 7 most recent days with backups (days without backups do not count).
|
|
|
|
The rules are applied from secondly to yearly, and backups selected by previous
|
|
|
|
rules do not count towards those of later rules. The time that each backup
|
2016-06-09 18:29:44 +00:00
|
|
|
starts is used for pruning purposes. Dates and times are interpreted in
|
2016-04-18 20:38:20 +00:00
|
|
|
the local timezone, and weeks go from Monday to Sunday. Specifying a
|
|
|
|
negative number of archives to keep means that there is no limit.
|
|
|
|
|
2016-04-18 19:56:05 +00:00
|
|
|
The "--keep-last N" option is doing the same as "--keep-secondly N" (and it will
|
|
|
|
keep the last N archives under the assumption that you do not create more than one
|
|
|
|
backup archive in the same second).
|
2014-04-06 13:16:25 +00:00
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('prune', parents=[common_parser], add_help=False,
|
2014-02-04 01:11:47 +00:00
|
|
|
description=self.do_prune.__doc__,
|
2014-04-06 13:16:25 +00:00
|
|
|
epilog=prune_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='prune archives')
|
2011-11-22 20:47:17 +00:00
|
|
|
subparser.set_defaults(func=self.do_prune)
|
2014-02-20 02:33:05 +00:00
|
|
|
subparser.add_argument('-n', '--dry-run', dest='dry_run',
|
|
|
|
default=False, action='store_true',
|
|
|
|
help='do not change repository')
|
2016-07-01 02:27:06 +00:00
|
|
|
subparser.add_argument('--force', dest='forced',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='force pruning of corrupted archives')
|
2014-03-19 21:32:07 +00:00
|
|
|
subparser.add_argument('-s', '--stats', dest='stats',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='print statistics for the deleted archive')
|
2016-02-18 22:44:23 +00:00
|
|
|
subparser.add_argument('--list', dest='output_list',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='output verbose list of archives it keeps/prunes')
|
2014-02-19 21:46:15 +00:00
|
|
|
subparser.add_argument('--keep-within', dest='within', type=str, metavar='WITHIN',
|
2014-02-08 20:37:27 +00:00
|
|
|
help='keep all archives within this time interval')
|
2016-04-18 19:56:05 +00:00
|
|
|
subparser.add_argument('--keep-last', '--keep-secondly', dest='secondly', type=int, default=0,
|
|
|
|
help='number of secondly archives to keep')
|
|
|
|
subparser.add_argument('--keep-minutely', dest='minutely', type=int, default=0,
|
|
|
|
help='number of minutely archives to keep')
|
2014-02-19 21:46:15 +00:00
|
|
|
subparser.add_argument('-H', '--keep-hourly', dest='hourly', type=int, default=0,
|
2013-07-05 10:32:56 +00:00
|
|
|
help='number of hourly archives to keep')
|
2014-02-19 21:46:15 +00:00
|
|
|
subparser.add_argument('-d', '--keep-daily', dest='daily', type=int, default=0,
|
2013-07-05 10:32:56 +00:00
|
|
|
help='number of daily archives to keep')
|
2014-02-19 21:46:15 +00:00
|
|
|
subparser.add_argument('-w', '--keep-weekly', dest='weekly', type=int, default=0,
|
2014-02-09 21:15:49 +00:00
|
|
|
help='number of weekly archives to keep')
|
2014-02-19 21:46:15 +00:00
|
|
|
subparser.add_argument('-m', '--keep-monthly', dest='monthly', type=int, default=0,
|
2013-07-05 10:32:56 +00:00
|
|
|
help='number of monthly archives to keep')
|
2014-02-19 21:46:15 +00:00
|
|
|
subparser.add_argument('-y', '--keep-yearly', dest='yearly', type=int, default=0,
|
2013-07-05 10:32:56 +00:00
|
|
|
help='number of yearly archives to keep')
|
2016-06-21 21:36:30 +00:00
|
|
|
subparser.add_argument('-P', '--prefix', dest='prefix', type=PrefixSpec,
|
2013-07-05 10:32:56 +00:00
|
|
|
help='only consider archive names starting with this prefix')
|
2015-11-18 01:27:25 +00:00
|
|
|
subparser.add_argument('--save-space', dest='save_space', action='store_true',
|
|
|
|
default=False,
|
|
|
|
help='work slower, but using less space')
|
2015-12-12 12:50:24 +00:00
|
|
|
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
|
2011-08-06 21:33:06 +00:00
|
|
|
type=location_validator(archive=False),
|
2013-07-05 10:32:56 +00:00
|
|
|
help='repository to prune')
|
2014-02-08 14:44:31 +00:00
|
|
|
|
2015-10-03 16:36:52 +00:00
|
|
|
upgrade_epilog = textwrap.dedent("""
|
2016-02-07 18:23:06 +00:00
|
|
|
Upgrade an existing Borg repository.
|
|
|
|
This currently supports converting an Attic repository to Borg and also
|
|
|
|
helps with converting Borg 0.xx to 1.0.
|
2015-10-01 03:50:46 +00:00
|
|
|
|
2016-02-07 20:08:20 +00:00
|
|
|
Currently, only LOCAL repositories can be upgraded (issue #465).
|
|
|
|
|
2016-01-14 16:00:45 +00:00
|
|
|
It will change the magic strings in the repository's segments
|
|
|
|
to match the new Borg magic strings. The keyfiles found in
|
2015-10-01 03:50:46 +00:00
|
|
|
$ATTIC_KEYS_DIR or ~/.attic/keys/ will also be converted and
|
2016-01-28 22:15:49 +00:00
|
|
|
copied to $BORG_KEYS_DIR or ~/.config/borg/keys.
|
2015-10-01 03:50:46 +00:00
|
|
|
|
2016-01-14 16:00:45 +00:00
|
|
|
The cache files are converted, from $ATTIC_CACHE_DIR or
|
2015-10-02 14:12:13 +00:00
|
|
|
~/.cache/attic to $BORG_CACHE_DIR or ~/.cache/borg, but the
|
|
|
|
cache layout between Borg and Attic changed, so it is possible
|
|
|
|
the first backup after the conversion takes longer than expected
|
|
|
|
due to the cache resync.
|
2015-10-01 03:50:46 +00:00
|
|
|
|
2016-01-14 16:00:45 +00:00
|
|
|
Upgrade should be able to resume if interrupted, although it
|
|
|
|
will still iterate over all segments. If you want to start
|
2015-10-03 16:46:23 +00:00
|
|
|
from scratch, use `borg delete` over the copied repository to
|
|
|
|
make sure the cache files are also removed:
|
|
|
|
|
|
|
|
borg delete borg
|
|
|
|
|
2016-01-14 16:00:45 +00:00
|
|
|
Unless ``--inplace`` is specified, the upgrade process first
|
do not upgrade repositories in place by default
instead, we perform the equivalent of `cp -al` on the repository to
keep a backup, and then rewrite the files, breaking the hardlinks as
necessary.
it has to be confirmed that the rest of Borg will also break hardlinks
when operating on files in the repository. if Borg operates in place
on any files of the repository, it could jeoperdize the backup, so
this needs to be verified. I believe that most files are written to a
temporary file and moved into place, however, so the backup should be
safe.
the rationale behind the backup copy is that we want to be extra
careful with user's data by default. the old behavior is retained
through the `--inplace`/`-i` commandline flag. plus, this way we don't
need to tell users to go through extra steps (`cp -a`, in particular)
before running the command.
also, it can take a long time to do the copy of the attic repository
we wish to work on. since `cp -a` doesn't provide progress
information, the new default behavior provides a nicer user experience
of giving an overall impression of the upgrade progress, while
retaining compatibility with Attic by default (in a separate
repository, of course).
this makes the upgrade command much less scary to use and hopefully
will convert drones to the borg collective.
the only place where the default inplace behavior is retained is in
the header_replace() function, to avoid breaking the cache conversion
code and to keep API stability and semantic coherence ("replace" by
defaults means in place).
2015-10-15 22:02:24 +00:00
|
|
|
creates a backup copy of the repository, in
|
2016-01-14 16:00:45 +00:00
|
|
|
REPOSITORY.upgrade-DATETIME, using hardlinks. This takes
|
do not upgrade repositories in place by default
instead, we perform the equivalent of `cp -al` on the repository to
keep a backup, and then rewrite the files, breaking the hardlinks as
necessary.
it has to be confirmed that the rest of Borg will also break hardlinks
when operating on files in the repository. if Borg operates in place
on any files of the repository, it could jeoperdize the backup, so
this needs to be verified. I believe that most files are written to a
temporary file and moved into place, however, so the backup should be
safe.
the rationale behind the backup copy is that we want to be extra
careful with user's data by default. the old behavior is retained
through the `--inplace`/`-i` commandline flag. plus, this way we don't
need to tell users to go through extra steps (`cp -a`, in particular)
before running the command.
also, it can take a long time to do the copy of the attic repository
we wish to work on. since `cp -a` doesn't provide progress
information, the new default behavior provides a nicer user experience
of giving an overall impression of the upgrade progress, while
retaining compatibility with Attic by default (in a separate
repository, of course).
this makes the upgrade command much less scary to use and hopefully
will convert drones to the borg collective.
the only place where the default inplace behavior is retained is in
the header_replace() function, to avoid breaking the cache conversion
code and to keep API stability and semantic coherence ("replace" by
defaults means in place).
2015-10-15 22:02:24 +00:00
|
|
|
longer than in place upgrades, but is much safer and gives
|
2016-01-14 16:00:45 +00:00
|
|
|
progress information (as opposed to ``cp -al``). Once you are
|
do not upgrade repositories in place by default
instead, we perform the equivalent of `cp -al` on the repository to
keep a backup, and then rewrite the files, breaking the hardlinks as
necessary.
it has to be confirmed that the rest of Borg will also break hardlinks
when operating on files in the repository. if Borg operates in place
on any files of the repository, it could jeoperdize the backup, so
this needs to be verified. I believe that most files are written to a
temporary file and moved into place, however, so the backup should be
safe.
the rationale behind the backup copy is that we want to be extra
careful with user's data by default. the old behavior is retained
through the `--inplace`/`-i` commandline flag. plus, this way we don't
need to tell users to go through extra steps (`cp -a`, in particular)
before running the command.
also, it can take a long time to do the copy of the attic repository
we wish to work on. since `cp -a` doesn't provide progress
information, the new default behavior provides a nicer user experience
of giving an overall impression of the upgrade progress, while
retaining compatibility with Attic by default (in a separate
repository, of course).
this makes the upgrade command much less scary to use and hopefully
will convert drones to the borg collective.
the only place where the default inplace behavior is retained is in
the header_replace() function, to avoid breaking the cache conversion
code and to keep API stability and semantic coherence ("replace" by
defaults means in place).
2015-10-15 22:02:24 +00:00
|
|
|
satisfied with the conversion, you can safely destroy the
|
|
|
|
backup copy.
|
|
|
|
|
2016-01-14 16:00:45 +00:00
|
|
|
WARNING: Running the upgrade in place will make the current
|
2015-10-18 02:04:00 +00:00
|
|
|
copy unusable with older version, with no way of going back
|
2016-01-14 16:00:45 +00:00
|
|
|
to previous versions. This can PERMANENTLY DAMAGE YOUR
|
do not upgrade repositories in place by default
instead, we perform the equivalent of `cp -al` on the repository to
keep a backup, and then rewrite the files, breaking the hardlinks as
necessary.
it has to be confirmed that the rest of Borg will also break hardlinks
when operating on files in the repository. if Borg operates in place
on any files of the repository, it could jeoperdize the backup, so
this needs to be verified. I believe that most files are written to a
temporary file and moved into place, however, so the backup should be
safe.
the rationale behind the backup copy is that we want to be extra
careful with user's data by default. the old behavior is retained
through the `--inplace`/`-i` commandline flag. plus, this way we don't
need to tell users to go through extra steps (`cp -a`, in particular)
before running the command.
also, it can take a long time to do the copy of the attic repository
we wish to work on. since `cp -a` doesn't provide progress
information, the new default behavior provides a nicer user experience
of giving an overall impression of the upgrade progress, while
retaining compatibility with Attic by default (in a separate
repository, of course).
this makes the upgrade command much less scary to use and hopefully
will convert drones to the borg collective.
the only place where the default inplace behavior is retained is in
the header_replace() function, to avoid breaking the cache conversion
code and to keep API stability and semantic coherence ("replace" by
defaults means in place).
2015-10-15 22:02:24 +00:00
|
|
|
REPOSITORY! Attic CAN NOT READ BORG REPOSITORIES, as the
|
2016-01-14 16:00:45 +00:00
|
|
|
magic strings have changed. You have been warned.""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('upgrade', parents=[common_parser], add_help=False,
|
2015-10-03 16:36:52 +00:00
|
|
|
description=self.do_upgrade.__doc__,
|
|
|
|
epilog=upgrade_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='upgrade repository format')
|
2015-10-03 16:36:52 +00:00
|
|
|
subparser.set_defaults(func=self.do_upgrade)
|
2016-01-16 19:32:24 +00:00
|
|
|
subparser.add_argument('-p', '--progress', dest='progress',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help="""show progress display while upgrading the repository""")
|
2015-10-01 03:50:46 +00:00
|
|
|
subparser.add_argument('-n', '--dry-run', dest='dry_run',
|
|
|
|
default=False, action='store_true',
|
|
|
|
help='do not change repository')
|
do not upgrade repositories in place by default
instead, we perform the equivalent of `cp -al` on the repository to
keep a backup, and then rewrite the files, breaking the hardlinks as
necessary.
it has to be confirmed that the rest of Borg will also break hardlinks
when operating on files in the repository. if Borg operates in place
on any files of the repository, it could jeoperdize the backup, so
this needs to be verified. I believe that most files are written to a
temporary file and moved into place, however, so the backup should be
safe.
the rationale behind the backup copy is that we want to be extra
careful with user's data by default. the old behavior is retained
through the `--inplace`/`-i` commandline flag. plus, this way we don't
need to tell users to go through extra steps (`cp -a`, in particular)
before running the command.
also, it can take a long time to do the copy of the attic repository
we wish to work on. since `cp -a` doesn't provide progress
information, the new default behavior provides a nicer user experience
of giving an overall impression of the upgrade progress, while
retaining compatibility with Attic by default (in a separate
repository, of course).
this makes the upgrade command much less scary to use and hopefully
will convert drones to the borg collective.
the only place where the default inplace behavior is retained is in
the header_replace() function, to avoid breaking the cache conversion
code and to keep API stability and semantic coherence ("replace" by
defaults means in place).
2015-10-15 22:02:24 +00:00
|
|
|
subparser.add_argument('-i', '--inplace', dest='inplace',
|
|
|
|
default=False, action='store_true',
|
2015-10-18 02:04:00 +00:00
|
|
|
help="""rewrite repository in place, with no chance of going back to older
|
do not upgrade repositories in place by default
instead, we perform the equivalent of `cp -al` on the repository to
keep a backup, and then rewrite the files, breaking the hardlinks as
necessary.
it has to be confirmed that the rest of Borg will also break hardlinks
when operating on files in the repository. if Borg operates in place
on any files of the repository, it could jeoperdize the backup, so
this needs to be verified. I believe that most files are written to a
temporary file and moved into place, however, so the backup should be
safe.
the rationale behind the backup copy is that we want to be extra
careful with user's data by default. the old behavior is retained
through the `--inplace`/`-i` commandline flag. plus, this way we don't
need to tell users to go through extra steps (`cp -a`, in particular)
before running the command.
also, it can take a long time to do the copy of the attic repository
we wish to work on. since `cp -a` doesn't provide progress
information, the new default behavior provides a nicer user experience
of giving an overall impression of the upgrade progress, while
retaining compatibility with Attic by default (in a separate
repository, of course).
this makes the upgrade command much less scary to use and hopefully
will convert drones to the borg collective.
the only place where the default inplace behavior is retained is in
the header_replace() function, to avoid breaking the cache conversion
code and to keep API stability and semantic coherence ("replace" by
defaults means in place).
2015-10-15 22:02:24 +00:00
|
|
|
versions of the repository.""")
|
2015-12-12 12:50:24 +00:00
|
|
|
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
|
2015-10-01 03:50:46 +00:00
|
|
|
type=location_validator(archive=False),
|
2015-10-03 16:36:52 +00:00
|
|
|
help='path to the repository to be upgraded')
|
2015-10-01 03:50:46 +00:00
|
|
|
|
2016-04-07 09:29:52 +00:00
|
|
|
recreate_epilog = textwrap.dedent("""
|
|
|
|
Recreate the contents of existing archives.
|
|
|
|
|
|
|
|
--exclude, --exclude-from and PATH have the exact same semantics
|
2016-04-10 13:59:10 +00:00
|
|
|
as in "borg create". If PATHs are specified the resulting archive
|
|
|
|
will only contain files from these PATHs.
|
2016-04-07 09:29:52 +00:00
|
|
|
|
2016-08-09 19:32:03 +00:00
|
|
|
Note that all paths in an archive are relative, therefore absolute patterns/paths
|
|
|
|
will *not* match (--exclude, --exclude-from, --compression-from, PATHs).
|
|
|
|
|
2016-04-07 09:29:52 +00:00
|
|
|
--compression: all chunks seen will be stored using the given method.
|
|
|
|
Due to how Borg stores compressed size information this might display
|
2016-04-10 13:59:10 +00:00
|
|
|
incorrect information for archives that were not recreated at the same time.
|
2016-04-07 09:29:52 +00:00
|
|
|
There is no risk of data loss by this.
|
|
|
|
|
|
|
|
--chunker-params will re-chunk all files in the archive, this can be
|
|
|
|
used to have upgraded Borg 0.xx or Attic archives deduplicate with
|
|
|
|
Borg 1.x archives.
|
|
|
|
|
|
|
|
borg recreate is signal safe. Send either SIGINT (Ctrl-C on most terminals) or
|
|
|
|
SIGTERM to request termination.
|
|
|
|
|
|
|
|
Use the *exact same* command line to resume the operation later - changing excludes
|
|
|
|
or paths will lead to inconsistencies (changed excludes will only apply to newly
|
|
|
|
processed files/dirs). Changing compression leads to incorrect size information
|
|
|
|
(which does not cause any data loss, but can be misleading).
|
|
|
|
Changing chunker params between invocations might lead to data loss.
|
|
|
|
|
|
|
|
USE WITH CAUTION.
|
2016-04-10 13:59:10 +00:00
|
|
|
Depending on the PATHs and patterns given, recreate can be used to permanently
|
|
|
|
delete files from archives.
|
2016-04-07 09:29:52 +00:00
|
|
|
When in doubt, use "--dry-run --verbose --list" to see how patterns/PATHS are
|
|
|
|
interpreted.
|
|
|
|
|
|
|
|
The archive being recreated is only removed after the operation completes. The
|
|
|
|
archive that is built during the operation exists at the same time at
|
|
|
|
"<ARCHIVE>.recreate". The new archive will have a different archive ID.
|
|
|
|
|
2016-08-02 13:53:29 +00:00
|
|
|
With --target the original archive is not replaced, instead a new archive is created.
|
|
|
|
|
2016-04-07 09:29:52 +00:00
|
|
|
When rechunking space usage can be substantial, expect at least the entire
|
2016-04-10 13:59:10 +00:00
|
|
|
deduplicated size of the archives using the previous chunker params.
|
2016-04-07 09:29:52 +00:00
|
|
|
When recompressing approximately 1 % of the repository size or 512 MB
|
|
|
|
(whichever is greater) of additional space is used.
|
|
|
|
""")
|
|
|
|
subparser = subparsers.add_parser('recreate', parents=[common_parser], add_help=False,
|
|
|
|
description=self.do_recreate.__doc__,
|
|
|
|
epilog=recreate_epilog,
|
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help=self.do_recreate.__doc__)
|
|
|
|
subparser.set_defaults(func=self.do_recreate)
|
|
|
|
subparser.add_argument('--list', dest='output_list',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='output verbose list of items (files, dirs, ...)')
|
|
|
|
subparser.add_argument('--filter', dest='output_filter', metavar='STATUSCHARS',
|
|
|
|
help='only display items with the given status characters')
|
|
|
|
subparser.add_argument('-p', '--progress', dest='progress',
|
|
|
|
action='store_true', default=False,
|
2016-04-10 13:59:10 +00:00
|
|
|
help='show progress display while recreating archives')
|
2016-04-07 09:29:52 +00:00
|
|
|
subparser.add_argument('-n', '--dry-run', dest='dry_run',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='do not change anything')
|
|
|
|
subparser.add_argument('-s', '--stats', dest='stats',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='print statistics at end')
|
2016-04-10 12:09:05 +00:00
|
|
|
|
|
|
|
exclude_group = subparser.add_argument_group('Exclusion options')
|
|
|
|
exclude_group.add_argument('-e', '--exclude', dest='excludes',
|
|
|
|
type=parse_pattern, action='append',
|
|
|
|
metavar="PATTERN", help='exclude paths matching PATTERN')
|
|
|
|
exclude_group.add_argument('--exclude-from', dest='exclude_files',
|
|
|
|
type=argparse.FileType('r'), action='append',
|
|
|
|
metavar='EXCLUDEFILE', help='read exclude patterns from EXCLUDEFILE, one per line')
|
|
|
|
exclude_group.add_argument('--exclude-caches', dest='exclude_caches',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='exclude directories that contain a CACHEDIR.TAG file ('
|
|
|
|
'http://www.brynosaurus.com/cachedir/spec.html)')
|
|
|
|
exclude_group.add_argument('--exclude-if-present', dest='exclude_if_present',
|
|
|
|
metavar='FILENAME', action='append', type=str,
|
|
|
|
help='exclude directories that contain the specified file')
|
|
|
|
exclude_group.add_argument('--keep-tag-files', dest='keep_tag_files',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='keep tag files of excluded caches/directories')
|
|
|
|
|
|
|
|
archive_group = subparser.add_argument_group('Archive options')
|
2016-08-02 13:53:29 +00:00
|
|
|
archive_group.add_argument('--target', dest='target', metavar='TARGET', default=None,
|
|
|
|
type=archivename_validator(),
|
|
|
|
help='create a new archive with the name ARCHIVE, do not replace existing archive '
|
|
|
|
'(only applies for a single archive)')
|
2016-04-10 12:09:05 +00:00
|
|
|
archive_group.add_argument('--comment', dest='comment', metavar='COMMENT', default=None,
|
|
|
|
help='add a comment text to the archive')
|
|
|
|
archive_group.add_argument('--timestamp', dest='timestamp',
|
|
|
|
type=timestamp, default=None,
|
|
|
|
metavar='yyyy-mm-ddThh:mm:ss',
|
|
|
|
help='manually specify the archive creation date/time (UTC). '
|
|
|
|
'alternatively, give a reference file/directory.')
|
2016-04-07 09:29:52 +00:00
|
|
|
archive_group.add_argument('-C', '--compression', dest='compression',
|
|
|
|
type=CompressionSpec, default=None, metavar='COMPRESSION',
|
|
|
|
help='select compression algorithm (and level):\n'
|
|
|
|
'none == no compression (default),\n'
|
2016-05-02 19:50:59 +00:00
|
|
|
'auto,C[,L] == built-in heuristic decides between none or C[,L] - with C[,L]\n'
|
|
|
|
' being any valid compression algorithm (and optional level),\n'
|
2016-04-07 09:29:52 +00:00
|
|
|
'lz4 == lz4,\n'
|
|
|
|
'zlib == zlib (default level 6),\n'
|
|
|
|
'zlib,0 .. zlib,9 == zlib (with level 0..9),\n'
|
|
|
|
'lzma == lzma (default level 6),\n'
|
|
|
|
'lzma,0 .. lzma,9 == lzma (with level 0..9).')
|
2016-07-31 21:09:57 +00:00
|
|
|
archive_group.add_argument('--always-recompress', dest='always_recompress', action='store_true',
|
|
|
|
help='always recompress chunks, don\'t skip chunks already compressed with the same'
|
|
|
|
'algorithm.')
|
2016-04-18 23:13:10 +00:00
|
|
|
archive_group.add_argument('--compression-from', dest='compression_files',
|
|
|
|
type=argparse.FileType('r'), action='append',
|
|
|
|
metavar='COMPRESSIONCONFIG', help='read compression patterns from COMPRESSIONCONFIG, one per line')
|
2016-04-10 12:09:05 +00:00
|
|
|
archive_group.add_argument('--chunker-params', dest='chunker_params',
|
|
|
|
type=ChunkerParams, default=None,
|
|
|
|
metavar='CHUNK_MIN_EXP,CHUNK_MAX_EXP,HASH_MASK_BITS,HASH_WINDOW_SIZE',
|
|
|
|
help='specify the chunker parameters (or "default").')
|
|
|
|
|
2016-04-07 09:29:52 +00:00
|
|
|
subparser.add_argument('location', metavar='REPOSITORY_OR_ARCHIVE', nargs='?', default='',
|
|
|
|
type=location_validator(),
|
|
|
|
help='repository/archive to recreate')
|
|
|
|
subparser.add_argument('paths', metavar='PATH', nargs='*', type=str,
|
|
|
|
help='paths to recreate; patterns are supported')
|
|
|
|
|
2016-04-27 23:28:43 +00:00
|
|
|
with_lock_epilog = textwrap.dedent("""
|
|
|
|
This command runs a user-specified command while the repository lock is held.
|
|
|
|
|
|
|
|
It will first try to acquire the lock (make sure that no other operation is
|
|
|
|
running in the repo), then execute the given command as a subprocess and wait
|
|
|
|
for its termination, release the lock and return the user command's return
|
|
|
|
code as borg's return code.
|
|
|
|
|
|
|
|
Note: if you copy a repository with the lock held, the lock will be present in
|
|
|
|
the copy, obviously. Thus, before using borg on the copy, you need to
|
|
|
|
use "borg break-lock" on it.
|
|
|
|
""")
|
|
|
|
subparser = subparsers.add_parser('with-lock', parents=[common_parser], add_help=False,
|
|
|
|
description=self.do_with_lock.__doc__,
|
|
|
|
epilog=with_lock_epilog,
|
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='run user command with lock held')
|
|
|
|
subparser.set_defaults(func=self.do_with_lock)
|
|
|
|
subparser.add_argument('location', metavar='REPOSITORY',
|
|
|
|
type=location_validator(archive=False),
|
|
|
|
help='repository to lock')
|
|
|
|
subparser.add_argument('command', metavar='COMMAND',
|
|
|
|
help='command to run')
|
|
|
|
subparser.add_argument('args', metavar='ARGS', nargs=argparse.REMAINDER,
|
|
|
|
help='command arguments')
|
|
|
|
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('help', parents=[common_parser], add_help=False,
|
2014-02-08 14:44:31 +00:00
|
|
|
description='Extra help')
|
2014-04-06 13:16:25 +00:00
|
|
|
subparser.add_argument('--epilog-only', dest='epilog_only',
|
|
|
|
action='store_true', default=False)
|
|
|
|
subparser.add_argument('--usage-only', dest='usage_only',
|
|
|
|
action='store_true', default=False)
|
|
|
|
subparser.set_defaults(func=functools.partial(self.do_help, parser, subparsers.choices))
|
2014-03-21 21:12:15 +00:00
|
|
|
subparser.add_argument('topic', metavar='TOPIC', type=str, nargs='?',
|
2014-02-08 14:44:31 +00:00
|
|
|
help='additional help on TOPIC')
|
2015-11-03 21:51:59 +00:00
|
|
|
|
2016-08-15 17:20:51 +00:00
|
|
|
debug_info_epilog = textwrap.dedent("""
|
|
|
|
This command displays some system information that might be useful for bug
|
|
|
|
reports and debugging problems. If a traceback happens, this information is
|
|
|
|
already appended at the end of the traceback.
|
|
|
|
""")
|
2016-08-19 22:04:55 +00:00
|
|
|
subparser = subparsers.add_parser('debug-info', parents=[common_parser], add_help=False,
|
2016-08-15 17:20:51 +00:00
|
|
|
description=self.do_debug_info.__doc__,
|
|
|
|
epilog=debug_info_epilog,
|
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='show system infos for debugging / bug reports (debug)')
|
|
|
|
subparser.set_defaults(func=self.do_debug_info)
|
|
|
|
|
2015-11-03 21:51:59 +00:00
|
|
|
debug_dump_archive_items_epilog = textwrap.dedent("""
|
|
|
|
This command dumps raw (but decrypted and decompressed) archive items (only metadata) to files.
|
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('debug-dump-archive-items', parents=[common_parser], add_help=False,
|
2015-11-03 21:51:59 +00:00
|
|
|
description=self.do_debug_dump_archive_items.__doc__,
|
|
|
|
epilog=debug_dump_archive_items_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='dump archive items (metadata) (debug)')
|
2015-11-03 21:51:59 +00:00
|
|
|
subparser.set_defaults(func=self.do_debug_dump_archive_items)
|
2015-12-12 12:50:24 +00:00
|
|
|
subparser.add_argument('location', metavar='ARCHIVE',
|
2015-11-03 21:51:59 +00:00
|
|
|
type=location_validator(archive=True),
|
|
|
|
help='archive to dump')
|
2015-11-04 00:05:21 +00:00
|
|
|
|
2016-08-04 12:45:53 +00:00
|
|
|
debug_dump_repo_objs_epilog = textwrap.dedent("""
|
|
|
|
This command dumps raw (but decrypted and decompressed) repo objects to files.
|
|
|
|
""")
|
2016-08-05 23:24:22 +00:00
|
|
|
subparser = subparsers.add_parser('debug-dump-repo-objs', parents=[common_parser], add_help=False,
|
2016-08-04 12:45:53 +00:00
|
|
|
description=self.do_debug_dump_repo_objs.__doc__,
|
|
|
|
epilog=debug_dump_repo_objs_epilog,
|
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='dump repo objects (debug)')
|
|
|
|
subparser.set_defaults(func=self.do_debug_dump_repo_objs)
|
|
|
|
subparser.add_argument('location', metavar='REPOSITORY',
|
|
|
|
type=location_validator(archive=False),
|
|
|
|
help='repo to dump')
|
|
|
|
|
2015-11-06 16:45:30 +00:00
|
|
|
debug_get_obj_epilog = textwrap.dedent("""
|
|
|
|
This command gets an object from the repository.
|
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('debug-get-obj', parents=[common_parser], add_help=False,
|
2015-11-06 16:45:30 +00:00
|
|
|
description=self.do_debug_get_obj.__doc__,
|
|
|
|
epilog=debug_get_obj_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='get object from repository (debug)')
|
2015-11-06 16:45:30 +00:00
|
|
|
subparser.set_defaults(func=self.do_debug_get_obj)
|
2015-12-12 12:50:24 +00:00
|
|
|
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
|
2015-11-06 16:45:30 +00:00
|
|
|
type=location_validator(archive=False),
|
|
|
|
help='repository to use')
|
|
|
|
subparser.add_argument('id', metavar='ID', type=str,
|
|
|
|
help='hex object ID to get from the repo')
|
|
|
|
subparser.add_argument('path', metavar='PATH', type=str,
|
|
|
|
help='file to write object data into')
|
|
|
|
|
2015-11-06 16:31:05 +00:00
|
|
|
debug_put_obj_epilog = textwrap.dedent("""
|
|
|
|
This command puts objects into the repository.
|
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('debug-put-obj', parents=[common_parser], add_help=False,
|
2015-11-06 16:31:05 +00:00
|
|
|
description=self.do_debug_put_obj.__doc__,
|
|
|
|
epilog=debug_put_obj_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='put object to repository (debug)')
|
2015-11-06 16:31:05 +00:00
|
|
|
subparser.set_defaults(func=self.do_debug_put_obj)
|
2015-12-12 12:50:24 +00:00
|
|
|
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
|
2015-11-06 16:31:05 +00:00
|
|
|
type=location_validator(archive=False),
|
|
|
|
help='repository to use')
|
|
|
|
subparser.add_argument('paths', metavar='PATH', nargs='+', type=str,
|
|
|
|
help='file(s) to read and create object(s) from')
|
|
|
|
|
2015-11-04 00:05:21 +00:00
|
|
|
debug_delete_obj_epilog = textwrap.dedent("""
|
|
|
|
This command deletes objects from the repository.
|
|
|
|
""")
|
2016-04-09 23:28:18 +00:00
|
|
|
subparser = subparsers.add_parser('debug-delete-obj', parents=[common_parser], add_help=False,
|
2015-11-04 00:05:21 +00:00
|
|
|
description=self.do_debug_delete_obj.__doc__,
|
|
|
|
epilog=debug_delete_obj_epilog,
|
2016-01-25 16:40:52 +00:00
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
help='delete object from repository (debug)')
|
2015-11-04 00:05:21 +00:00
|
|
|
subparser.set_defaults(func=self.do_debug_delete_obj)
|
2015-12-12 12:50:24 +00:00
|
|
|
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
|
2015-11-04 00:05:21 +00:00
|
|
|
type=location_validator(archive=False),
|
|
|
|
help='repository to use')
|
|
|
|
subparser.add_argument('ids', metavar='IDs', nargs='+', type=str,
|
|
|
|
help='hex object ID(s) to delete from the repo')
|
2015-10-08 01:07:12 +00:00
|
|
|
return parser
|
|
|
|
|
2016-01-28 20:59:24 +00:00
|
|
|
def get_args(self, argv, cmd):
|
|
|
|
"""usually, just returns argv, except if we deal with a ssh forced command for borg serve."""
|
|
|
|
result = self.parse_args(argv[1:])
|
|
|
|
if cmd is not None and result.func == self.do_serve:
|
|
|
|
forced_result = result
|
|
|
|
argv = shlex.split(cmd)
|
|
|
|
result = self.parse_args(argv[1:])
|
|
|
|
if result.func != forced_result.func:
|
|
|
|
# someone is trying to execute a different borg subcommand, don't do that!
|
|
|
|
return forced_result
|
2016-06-30 15:59:12 +00:00
|
|
|
# we only take specific options from the forced "borg serve" command:
|
2016-01-28 20:59:24 +00:00
|
|
|
result.restrict_to_paths = forced_result.restrict_to_paths
|
2016-06-30 15:59:12 +00:00
|
|
|
result.append_only = forced_result.append_only
|
2016-01-28 20:59:24 +00:00
|
|
|
return result
|
|
|
|
|
2015-10-31 23:40:32 +00:00
|
|
|
def parse_args(self, args=None):
|
|
|
|
# We can't use argparse for "serve" since we don't want it to show up in "Available commands"
|
|
|
|
if args:
|
|
|
|
args = self.preprocess_args(args)
|
2016-05-25 20:01:38 +00:00
|
|
|
args = self.parser.parse_args(args or ['-h'])
|
2015-10-31 23:40:32 +00:00
|
|
|
update_excludes(args)
|
|
|
|
return args
|
|
|
|
|
2016-04-27 22:06:19 +00:00
|
|
|
def prerun_checks(self, logger):
|
|
|
|
check_extension_modules()
|
|
|
|
selftest(logger)
|
|
|
|
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
def _setup_implied_logging(self, args):
|
|
|
|
""" turn on INFO level logging for args that imply that they will produce output """
|
|
|
|
# map of option name to name of logger for that option
|
|
|
|
option_logger = {
|
|
|
|
'output_list': 'borg.output.list',
|
|
|
|
'show_version': 'borg.output.show-version',
|
|
|
|
'show_rc': 'borg.output.show-rc',
|
|
|
|
'stats': 'borg.output.stats',
|
|
|
|
'progress': 'borg.output.progress',
|
|
|
|
}
|
|
|
|
for option, logger_name in option_logger.items():
|
|
|
|
if args.get(option, False):
|
|
|
|
logging.getLogger(logger_name).setLevel('INFO')
|
|
|
|
|
2016-08-06 20:37:44 +00:00
|
|
|
def _setup_topic_debugging(self, args):
|
|
|
|
"""Turn on DEBUG level logging for specified --debug-topics."""
|
|
|
|
for topic in args.debug_topics:
|
|
|
|
if '.' not in topic:
|
|
|
|
topic = 'borg.debug.' + topic
|
|
|
|
logger.debug('Enabling debug topic %s', topic)
|
|
|
|
logging.getLogger(topic).setLevel('DEBUG')
|
|
|
|
|
2015-10-31 23:40:32 +00:00
|
|
|
def run(self, args):
|
|
|
|
os.umask(args.umask) # early, before opening files
|
2015-11-21 14:34:51 +00:00
|
|
|
self.lock_wait = args.lock_wait
|
2015-12-12 20:24:21 +00:00
|
|
|
setup_logging(level=args.log_level, is_serve=args.func == self.do_serve) # do not use loggers before this!
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
self._setup_implied_logging(vars(args))
|
2016-08-06 20:37:44 +00:00
|
|
|
self._setup_topic_debugging(args)
|
2016-03-12 21:50:32 +00:00
|
|
|
if args.show_version:
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
logging.getLogger('borg.output.show-version').info('borgbackup version %s' % __version__)
|
2016-04-27 22:06:19 +00:00
|
|
|
self.prerun_checks(logger)
|
2015-10-31 19:37:21 +00:00
|
|
|
if is_slow_msgpack():
|
|
|
|
logger.warning("Using a pure-python msgpack! This will result in lower performance.")
|
2010-10-16 09:45:36 +00:00
|
|
|
return args.func(args)
|
2010-03-06 17:25:35 +00:00
|
|
|
|
2011-10-29 15:01:07 +00:00
|
|
|
|
2015-08-12 02:09:36 +00:00
|
|
|
def sig_info_handler(signum, stack): # pragma: no cover
|
2015-05-14 14:46:44 +00:00
|
|
|
"""search the stack for infos about the currently processed file and print them"""
|
|
|
|
for frame in inspect.getouterframes(stack):
|
|
|
|
func, loc = frame[3], frame[0].f_locals
|
2015-05-22 17:21:41 +00:00
|
|
|
if func in ('process_file', '_process', ): # create op
|
2015-05-14 14:46:44 +00:00
|
|
|
path = loc['path']
|
|
|
|
try:
|
|
|
|
pos = loc['fd'].tell()
|
|
|
|
total = loc['st'].st_size
|
|
|
|
except Exception:
|
|
|
|
pos, total = 0, 0
|
2015-10-23 00:29:41 +00:00
|
|
|
logger.info("{0} {1}/{2}".format(path, format_file_size(pos), format_file_size(total)))
|
2015-05-14 14:46:44 +00:00
|
|
|
break
|
2015-05-22 17:21:41 +00:00
|
|
|
if func in ('extract_item', ): # extract op
|
2016-05-31 23:45:45 +00:00
|
|
|
path = loc['item'].path
|
2015-05-14 14:46:44 +00:00
|
|
|
try:
|
|
|
|
pos = loc['fd'].tell()
|
|
|
|
except Exception:
|
|
|
|
pos = 0
|
2015-10-23 00:29:41 +00:00
|
|
|
logger.info("{0} {1}/???".format(path, format_file_size(pos)))
|
2015-05-14 14:46:44 +00:00
|
|
|
break
|
|
|
|
|
|
|
|
|
2016-05-17 16:57:53 +00:00
|
|
|
class SIGTERMReceived(BaseException):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
def sig_term_handler(signum, stack):
|
|
|
|
raise SIGTERMReceived
|
|
|
|
|
|
|
|
|
2015-08-12 02:09:36 +00:00
|
|
|
def setup_signal_handlers(): # pragma: no cover
|
2015-05-14 14:46:44 +00:00
|
|
|
sigs = []
|
|
|
|
if hasattr(signal, 'SIGUSR1'):
|
|
|
|
sigs.append(signal.SIGUSR1) # kill -USR1 pid
|
|
|
|
if hasattr(signal, 'SIGINFO'):
|
|
|
|
sigs.append(signal.SIGINFO) # kill -INFO pid (or ctrl-t)
|
|
|
|
for sig in sigs:
|
|
|
|
signal.signal(sig, sig_info_handler)
|
2016-05-17 16:57:53 +00:00
|
|
|
signal.signal(signal.SIGTERM, sig_term_handler)
|
2015-05-14 14:46:44 +00:00
|
|
|
|
|
|
|
|
2015-08-12 02:09:36 +00:00
|
|
|
def main(): # pragma: no cover
|
2016-03-17 00:40:17 +00:00
|
|
|
# provide 'borg mount' behaviour when the main script/executable is named borgfs
|
|
|
|
if os.path.basename(sys.argv[0]) == "borgfs":
|
|
|
|
sys.argv.insert(1, "mount")
|
|
|
|
|
2016-06-18 11:57:54 +00:00
|
|
|
# Make sure stdout and stderr have errors='replace' to avoid unicode
|
2014-02-25 11:33:23 +00:00
|
|
|
# issues when print()-ing unicode file names
|
2016-07-26 17:41:38 +00:00
|
|
|
sys.stdout = ErrorIgnoringTextIOWrapper(sys.stdout.buffer, sys.stdout.encoding, 'replace', line_buffering=True)
|
|
|
|
sys.stderr = ErrorIgnoringTextIOWrapper(sys.stderr.buffer, sys.stderr.encoding, 'replace', line_buffering=True)
|
2015-05-14 14:46:44 +00:00
|
|
|
setup_signal_handlers()
|
2010-02-20 17:23:46 +00:00
|
|
|
archiver = Archiver()
|
2015-10-31 23:53:55 +00:00
|
|
|
msg = None
|
2016-06-21 21:00:08 +00:00
|
|
|
try:
|
|
|
|
args = archiver.get_args(sys.argv, os.environ.get('SSH_ORIGINAL_COMMAND'))
|
|
|
|
except Error as e:
|
|
|
|
msg = e.get_message()
|
|
|
|
if e.traceback:
|
|
|
|
msg += "\n%s\n%s" % (traceback.format_exc(), sysinfo())
|
|
|
|
# we might not have logging setup yet, so get out quickly
|
|
|
|
print(msg, file=sys.stderr)
|
|
|
|
sys.exit(e.exit_code)
|
2012-12-09 22:06:33 +00:00
|
|
|
try:
|
2015-10-31 23:40:32 +00:00
|
|
|
exit_code = archiver.run(args)
|
2013-12-15 19:35:29 +00:00
|
|
|
except Error as e:
|
2015-10-31 21:23:32 +00:00
|
|
|
msg = e.get_message()
|
|
|
|
if e.traceback:
|
2015-11-21 21:51:59 +00:00
|
|
|
msg += "\n%s\n%s" % (traceback.format_exc(), sysinfo())
|
2013-12-15 19:35:29 +00:00
|
|
|
exit_code = e.exit_code
|
2015-03-21 01:17:19 +00:00
|
|
|
except RemoteRepository.RPCError as e:
|
2015-12-12 21:45:29 +00:00
|
|
|
msg = '%s\n%s' % (str(e), sysinfo())
|
2015-10-20 23:11:51 +00:00
|
|
|
exit_code = EXIT_ERROR
|
2015-06-25 21:57:38 +00:00
|
|
|
except Exception:
|
2015-11-21 21:51:59 +00:00
|
|
|
msg = 'Local Exception.\n%s\n%s' % (traceback.format_exc(), sysinfo())
|
2015-10-20 23:11:51 +00:00
|
|
|
exit_code = EXIT_ERROR
|
2012-12-09 22:06:33 +00:00
|
|
|
except KeyboardInterrupt:
|
2015-11-21 21:51:59 +00:00
|
|
|
msg = 'Keyboard interrupt.\n%s\n%s' % (traceback.format_exc(), sysinfo())
|
2015-10-20 23:11:51 +00:00
|
|
|
exit_code = EXIT_ERROR
|
2016-05-17 16:57:53 +00:00
|
|
|
except SIGTERMReceived:
|
|
|
|
msg = 'Received SIGTERM.'
|
|
|
|
exit_code = EXIT_ERROR
|
2015-10-20 21:57:56 +00:00
|
|
|
if msg:
|
|
|
|
logger.error(msg)
|
2015-10-31 23:53:55 +00:00
|
|
|
if args.show_rc:
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
rc_logger = logging.getLogger('borg.output.show-rc')
|
2015-10-31 23:53:55 +00:00
|
|
|
exit_msg = 'terminating with %s status, rc %d'
|
|
|
|
if exit_code == EXIT_SUCCESS:
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
rc_logger.info(exit_msg % ('success', exit_code))
|
2015-10-31 23:53:55 +00:00
|
|
|
elif exit_code == EXIT_WARNING:
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
rc_logger.warning(exit_msg % ('warning', exit_code))
|
2015-10-31 23:53:55 +00:00
|
|
|
elif exit_code == EXIT_ERROR:
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
rc_logger.error(exit_msg % ('error', exit_code))
|
2015-10-31 23:53:55 +00:00
|
|
|
else:
|
Print implied output without --info/-v
There are persistent questions why output from options like --list
and --stats doesn't show up. Also, borg currently isn't able to
show *just* the output for a given option (--list, --stats,
--show-rc, --show-version, or --progress), without other INFO level
messages.
The solution is to use more granular loggers, so that messages
specific to a given option goes to a logger designated for that
option. That option-specific logger can then be configured
separately from the regular loggers.
Those option-specific loggers can also be used as a hook in a
BORG_LOGGING_CONF config file to log the --list output to a separate
file, or send --stats output to a network socket where some daemon
could analyze it.
Steps:
- create an option-specific logger for each of the implied output options
- modify the messages specific to each option to go to the correct logger
- if an implied output option is passed, change the option-specific
logger (only) to log at INFO level
- test that root logger messages don't come through option-specific loggers
They shouldn't, per https://docs.python.org/3/howto/logging.html#logging-flow
but test just the same. Particularly test a message that can come from
remote repositories.
Fixes #526, #573, #665, #824
2016-05-18 02:59:58 +00:00
|
|
|
rc_logger.error(exit_msg % ('abnormal', exit_code or 666))
|
2012-12-06 22:04:01 +00:00
|
|
|
sys.exit(exit_code)
|
2010-02-20 17:23:46 +00:00
|
|
|
|
2015-10-20 21:57:56 +00:00
|
|
|
|
2010-02-20 17:23:46 +00:00
|
|
|
if __name__ == '__main__':
|
2010-03-06 17:25:35 +00:00
|
|
|
main()
|