Browse Source

comment out uninmplemented args

Nick Sweeting 6 years ago
parent
commit
eb2b6978c3
2 changed files with 16 additions and 17 deletions
  1. 12 14
      archivebox/cli/archivebox_add.py
  2. 4 3
      archivebox/cli/archivebox_help.py

+ 12 - 14
archivebox/cli/archivebox_add.py

@@ -4,7 +4,6 @@ __package__ = 'archivebox.cli'
 __command__ = 'archivebox add'
 __command__ = 'archivebox add'
 __description__ = 'Add a new URL or list of URLs to your archive'
 __description__ = 'Add a new URL or list of URLs to your archive'
 
 
-import os
 import sys
 import sys
 import argparse
 import argparse
 
 
@@ -34,17 +33,17 @@ def main(args=None):
         action='store_true',
         action='store_true',
         help="Don't attempt to retry previously skipped/failed links when updating",
         help="Don't attempt to retry previously skipped/failed links when updating",
     )
     )
-    parser.add_argument(
-        '--mirror', #'-m',
-        action='store_true',
-        help='Archive an entire site (finding all linked pages below it on the same domain)',
-    )
-    parser.add_argument(
-        '--crawler', #'-r',
-        choices=('depth_first', 'breadth_first'),
-        help='Controls which crawler to use in order to find outlinks in a given page',
-        default=None,
-    )
+    # parser.add_argument(
+    #     '--mirror', #'-m',
+    #     action='store_true',
+    #     help='Archive an entire site (finding all linked pages below it on the same domain)',
+    # )
+    # parser.add_argument(
+    #     '--crawler', #'-r',
+    #     choices=('depth_first', 'breadth_first'),
+    #     help='Controls which crawler to use in order to find outlinks in a given page',
+    #     default=None,
+    # )
     parser.add_argument(
     parser.add_argument(
         'url',
         'url',
         nargs='?',
         nargs='?',
@@ -55,7 +54,7 @@ def main(args=None):
     command = parser.parse_args(args)
     command = parser.parse_args(args)
 
 
     ### Handle ingesting urls piped in through stdin
     ### Handle ingesting urls piped in through stdin
-    # (.e.g if user does cat example_urls.txt | ./archive)
+    # (.e.g if user does cat example_urls.txt | archivebox add)
     import_path = None
     import_path = None
     if not sys.stdin.isatty():
     if not sys.stdin.isatty():
         stdin_raw_text = sys.stdin.read()
         stdin_raw_text = sys.stdin.read()
@@ -73,7 +72,6 @@ def main(args=None):
     elif command.url:
     elif command.url:
         import_path = handle_file_import(command.url)
         import_path = handle_file_import(command.url)
 
 
-
     update_archive_data(
     update_archive_data(
         import_path=import_path,
         import_path=import_path,
         resume=None,
         resume=None,

+ 4 - 3
archivebox/cli/archivebox_help.py

@@ -39,11 +39,12 @@ Example Use:
     mkdir my-archive; cd my-archive/
     mkdir my-archive; cd my-archive/
     archivebox init
     archivebox init
 
 
-    echo 'https://example.com/some/page' | archivebox add
-    archivebox add https://example.com/some/other/page
+    archivebox add https://example.com/some/page
     archivebox add --depth=1 ~/Downloads/bookmarks_export.html
     archivebox add --depth=1 ~/Downloads/bookmarks_export.html
-    archivebox add --depth=1 https://example.com/feed.rss
+    
+    archivebox subscribe https://example.com/some/feed.rss
     archivebox update --resume=15109948213.123
     archivebox update --resume=15109948213.123
+    archivebox list --sort=timestamp --csv=timestamp,url,is_archived
 
 
 Documentation:
 Documentation:
     https://github.com/pirate/ArchiveBox/wiki
     https://github.com/pirate/ArchiveBox/wiki