mirror of
https://github.com/borgbackup/borg.git
synced 2025-02-23 22:51:35 +00:00
[TASK] #4471 – borg help should print <command> list in sorted order: Step 3
Moved borg benchmark and borg break-lock to the top.
This commit is contained in:
parent
2e42543d3d
commit
737d17cc3f
1 changed files with 80 additions and 80 deletions
|
@ -2604,6 +2604,86 @@ def define_archive_filters_group(subparser, *, sort_by=True, first_last=True):
|
|||
|
||||
subparsers = parser.add_subparsers(title='required arguments', metavar='<command>')
|
||||
|
||||
# borg benchmark
|
||||
benchmark_epilog = process_epilog("These commands do various benchmarks.")
|
||||
|
||||
subparser = subparsers.add_parser('benchmark', parents=[mid_common_parser], add_help=False,
|
||||
description='benchmark command',
|
||||
epilog=benchmark_epilog,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
help='benchmark command')
|
||||
|
||||
benchmark_parsers = subparser.add_subparsers(title='required arguments', metavar='<command>')
|
||||
subparser.set_defaults(fallback_func=functools.partial(self.do_subcommand_help, subparser))
|
||||
|
||||
bench_crud_epilog = process_epilog("""
|
||||
This command benchmarks borg CRUD (create, read, update, delete) operations.
|
||||
|
||||
It creates input data below the given PATH and backups this data into the given REPO.
|
||||
The REPO must already exist (it could be a fresh empty repo or an existing repo, the
|
||||
command will create / read / update / delete some archives named borg-benchmark-crud\\* there.
|
||||
|
||||
Make sure you have free space there, you'll need about 1GB each (+ overhead).
|
||||
|
||||
If your repository is encrypted and borg needs a passphrase to unlock the key, use:
|
||||
|
||||
BORG_PASSPHRASE=mysecret borg benchmark crud REPO PATH
|
||||
|
||||
Measurements are done with different input file sizes and counts.
|
||||
The file contents are very artificial (either all zero or all random),
|
||||
thus the measurement results do not necessarily reflect performance with real data.
|
||||
Also, due to the kind of content used, no compression is used in these benchmarks.
|
||||
|
||||
C- == borg create (1st archive creation, no compression, do not use files cache)
|
||||
C-Z- == all-zero files. full dedup, this is primarily measuring reader/chunker/hasher.
|
||||
C-R- == random files. no dedup, measuring throughput through all processing stages.
|
||||
|
||||
R- == borg extract (extract archive, dry-run, do everything, but do not write files to disk)
|
||||
R-Z- == all zero files. Measuring heavily duplicated files.
|
||||
R-R- == random files. No duplication here, measuring throughput through all processing
|
||||
stages, except writing to disk.
|
||||
|
||||
U- == borg create (2nd archive creation of unchanged input files, measure files cache speed)
|
||||
The throughput value is kind of virtual here, it does not actually read the file.
|
||||
U-Z- == needs to check the 2 all-zero chunks' existence in the repo.
|
||||
U-R- == needs to check existence of a lot of different chunks in the repo.
|
||||
|
||||
D- == borg delete archive (delete last remaining archive, measure deletion + compaction)
|
||||
D-Z- == few chunks to delete / few segments to compact/remove.
|
||||
D-R- == many chunks to delete / many segments to compact/remove.
|
||||
|
||||
Please note that there might be quite some variance in these measurements.
|
||||
Try multiple measurements and having a otherwise idle machine (and network, if you use it).
|
||||
""")
|
||||
subparser = benchmark_parsers.add_parser('crud', parents=[common_parser], add_help=False,
|
||||
description=self.do_benchmark_crud.__doc__,
|
||||
epilog=bench_crud_epilog,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
help='benchmarks borg CRUD (create, extract, update, delete).')
|
||||
subparser.set_defaults(func=self.do_benchmark_crud)
|
||||
|
||||
subparser.add_argument('location', metavar='REPO',
|
||||
type=location_validator(archive=False),
|
||||
help='repo to use for benchmark (must exist)')
|
||||
|
||||
subparser.add_argument('path', metavar='PATH', help='path were to create benchmark input data')
|
||||
|
||||
# borg break-lock
|
||||
break_lock_epilog = process_epilog("""
|
||||
This command breaks the repository and cache locks.
|
||||
Please use carefully and only while no borg process (on any machine) is
|
||||
trying to access the Cache or the Repository.
|
||||
""")
|
||||
subparser = subparsers.add_parser('break-lock', parents=[common_parser], add_help=False,
|
||||
description=self.do_break_lock.__doc__,
|
||||
epilog=break_lock_epilog,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
help='break repository and cache locks')
|
||||
subparser.set_defaults(func=self.do_break_lock)
|
||||
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
|
||||
type=location_validator(archive=False),
|
||||
help='repository for which to break the locks')
|
||||
|
||||
# borg mount
|
||||
mount_epilog = process_epilog("""
|
||||
This command mounts an archive as a FUSE filesystem. This can be useful for
|
||||
|
@ -3530,22 +3610,6 @@ def define_archive_filters_group(subparser, *, sort_by=True, first_last=True):
|
|||
help='format output as JSON')
|
||||
define_archive_filters_group(subparser)
|
||||
|
||||
# borg break-lock
|
||||
break_lock_epilog = process_epilog("""
|
||||
This command breaks the repository and cache locks.
|
||||
Please use carefully and only while no borg process (on any machine) is
|
||||
trying to access the Cache or the Repository.
|
||||
""")
|
||||
subparser = subparsers.add_parser('break-lock', parents=[common_parser], add_help=False,
|
||||
description=self.do_break_lock.__doc__,
|
||||
epilog=break_lock_epilog,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
help='break repository and cache locks')
|
||||
subparser.set_defaults(func=self.do_break_lock)
|
||||
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
|
||||
type=location_validator(archive=False),
|
||||
help='repository for which to break the locks')
|
||||
|
||||
# borg prune
|
||||
prune_epilog = process_epilog("""
|
||||
The prune command prunes a repository by deleting all archives not matching
|
||||
|
@ -4128,70 +4192,6 @@ def define_archive_filters_group(subparser, *, sort_by=True, first_last=True):
|
|||
subparser.add_argument('output', metavar='OUTPUT', type=argparse.FileType('wb'),
|
||||
help='Output file')
|
||||
|
||||
# borg benchmark
|
||||
benchmark_epilog = process_epilog("These commands do various benchmarks.")
|
||||
|
||||
subparser = subparsers.add_parser('benchmark', parents=[mid_common_parser], add_help=False,
|
||||
description='benchmark command',
|
||||
epilog=benchmark_epilog,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
help='benchmark command')
|
||||
|
||||
benchmark_parsers = subparser.add_subparsers(title='required arguments', metavar='<command>')
|
||||
subparser.set_defaults(fallback_func=functools.partial(self.do_subcommand_help, subparser))
|
||||
|
||||
bench_crud_epilog = process_epilog("""
|
||||
This command benchmarks borg CRUD (create, read, update, delete) operations.
|
||||
|
||||
It creates input data below the given PATH and backups this data into the given REPO.
|
||||
The REPO must already exist (it could be a fresh empty repo or an existing repo, the
|
||||
command will create / read / update / delete some archives named borg-benchmark-crud\\* there.
|
||||
|
||||
Make sure you have free space there, you'll need about 1GB each (+ overhead).
|
||||
|
||||
If your repository is encrypted and borg needs a passphrase to unlock the key, use:
|
||||
|
||||
BORG_PASSPHRASE=mysecret borg benchmark crud REPO PATH
|
||||
|
||||
Measurements are done with different input file sizes and counts.
|
||||
The file contents are very artificial (either all zero or all random),
|
||||
thus the measurement results do not necessarily reflect performance with real data.
|
||||
Also, due to the kind of content used, no compression is used in these benchmarks.
|
||||
|
||||
C- == borg create (1st archive creation, no compression, do not use files cache)
|
||||
C-Z- == all-zero files. full dedup, this is primarily measuring reader/chunker/hasher.
|
||||
C-R- == random files. no dedup, measuring throughput through all processing stages.
|
||||
|
||||
R- == borg extract (extract archive, dry-run, do everything, but do not write files to disk)
|
||||
R-Z- == all zero files. Measuring heavily duplicated files.
|
||||
R-R- == random files. No duplication here, measuring throughput through all processing
|
||||
stages, except writing to disk.
|
||||
|
||||
U- == borg create (2nd archive creation of unchanged input files, measure files cache speed)
|
||||
The throughput value is kind of virtual here, it does not actually read the file.
|
||||
U-Z- == needs to check the 2 all-zero chunks' existence in the repo.
|
||||
U-R- == needs to check existence of a lot of different chunks in the repo.
|
||||
|
||||
D- == borg delete archive (delete last remaining archive, measure deletion + compaction)
|
||||
D-Z- == few chunks to delete / few segments to compact/remove.
|
||||
D-R- == many chunks to delete / many segments to compact/remove.
|
||||
|
||||
Please note that there might be quite some variance in these measurements.
|
||||
Try multiple measurements and having a otherwise idle machine (and network, if you use it).
|
||||
""")
|
||||
subparser = benchmark_parsers.add_parser('crud', parents=[common_parser], add_help=False,
|
||||
description=self.do_benchmark_crud.__doc__,
|
||||
epilog=bench_crud_epilog,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
help='benchmarks borg CRUD (create, extract, update, delete).')
|
||||
subparser.set_defaults(func=self.do_benchmark_crud)
|
||||
|
||||
subparser.add_argument('location', metavar='REPO',
|
||||
type=location_validator(archive=False),
|
||||
help='repo to use for benchmark (must exist)')
|
||||
|
||||
subparser.add_argument('path', metavar='PATH', help='path were to create benchmark input data')
|
||||
|
||||
return parser
|
||||
|
||||
def get_args(self, argv, cmd):
|
||||
|
|
Loading…
Reference in a new issue