Browse Source

flake8 is happy now

boyska 3 years ago
parent
commit
beddb0b2d5
5 changed files with 39 additions and 35 deletions
  1. 1 1
      marxbook/__init__.py
  2. 10 10
      marxbook/cli.py
  3. 2 2
      marxbook/extract.py
  4. 23 22
      marxbook/store.py
  5. 3 0
      setup.cfg

+ 1 - 1
marxbook/__init__.py

@@ -1 +1 @@
-from .store import Store, Serializer
+from .store import Store, Serializer  # noqa: F401

+ 10 - 10
marxbook/cli.py

@@ -17,7 +17,7 @@ def get_parser():
     p.set_defaults(func=None)
     subcommands = p.add_subparsers(help="Sub-commands")
     list_p = subcommands.add_parser("list")
-    list_p.add_argument("--format", default="{Path}\t{Tag}\t{Title}\t{Url}", dest='fmt')
+    list_p.add_argument("--format", default="{Path}\t{Tag}\t{Title}\t{Url}", dest="fmt")
     list_p.add_argument("folder", nargs="?", default="")
     list_p.set_defaults(func=main_list)
 
@@ -26,8 +26,8 @@ def get_parser():
     add_p.add_argument("--tag", help="Comma-separated list of tags", default="")
     add_p.add_argument("--title", help="If omitted, auto-fetch")
     add_p.add_argument("--description", help="If omitted, auto-fetch")
-    add_p.add_argument("--edit", dest='edit', action='store_true', default=None)
-    add_p.add_argument("--no-edit", dest='edit', action='store_false', default=None)
+    add_p.add_argument("--edit", dest="edit", action="store_true", default=None)
+    add_p.add_argument("--no-edit", dest="edit", action="store_false", default=None)
     add_p.add_argument("url", nargs="?")
     add_p.set_defaults(func=main_add)
 
@@ -36,7 +36,6 @@ def get_parser():
     rm_p.add_argument("path")
     rm_p.set_defaults(func=main_rm)
 
-
     return p
 
 
@@ -54,10 +53,10 @@ def main():
 
 def main_list(store, args):
     for mark in store.folder(args.folder):
-        markdata = {"Folder": os.path.dirname(mark['Path'])}
+        markdata = {"Folder": os.path.dirname(mark["Path"])}
         markdata.update(mark)
-        markdata['Tag'] = ','.join(mark['Tag'])
-        print(args.fmt.replace(r'\t', '\t').format(**markdata))
+        markdata["Tag"] = ",".join(mark["Tag"])
+        print(args.fmt.replace(r"\t", "\t").format(**markdata))
 
 
 class Edit:
@@ -82,7 +81,6 @@ class Edit:
         return data
 
 
-
 def main_add(store, args):
     store = store.folder(args.folder)
     batch = args.batch
@@ -97,9 +95,11 @@ def main_add(store, args):
 
     for url in urls:
         data = dict(title=args.title, description=args.description, url=url)
-        data['tag'] = [t.strip() for t in args.tag.split(",")]
+        data["tag"] = [t.strip() for t in args.tag.split(",")]
         if args.title is None or args.description is None:
-            _title, _description, _keys, mime, bad = marxbook.extract.network_handler(url)
+            _title, _description, _keys, mime, bad = marxbook.extract.network_handler(
+                url
+            )
             if not args.title:
                 data["title"] = _title
             if not args.description:

+ 2 - 2
marxbook/extract.py

@@ -1,8 +1,8 @@
-'''
+"""
 Extract relevant informations from URL.
 
 Most of the code comes from jarun/buku, licensed under GPLv3.
-'''
+"""
 
 
 import os

+ 23 - 22
marxbook/store.py

@@ -9,25 +9,26 @@ logger = logging.getLogger()
 
 def get_fname(url):
     m = hashlib.md5()
-    m.update(url.encode('utf8'))
+    m.update(url.encode("utf8"))
     return m.hexdigest()
 
 
 class Store:
     def __init__(self, basedir: Path = None):
         if basedir is None:
-            basedir = Path('~/.local/share/marxbook/bookmarks/').expanduser()
+            basedir = Path("~/.local/share/marxbook/bookmarks/").expanduser()
         self.basedir = basedir
         self.serializer = Serializer()
 
-    def add(self, url: str, title=None, tag=[], description=''):
+    def add(self, url: str, title=None, tag=[], description=""):
         dest = self.basedir
         dest.mkdir(parents=True, exist_ok=True)
         fname = get_fname(url)
         fpath = dest / fname
-        content = self.serializer.encode(dict(
-            url=url, title=title, tags=tag, description=description))
-        with fpath.open('w') as buf:
+        content = self.serializer.encode(
+            dict(url=url, title=title, tags=tag, description=description)
+        )
+        with fpath.open("w") as buf:
             buf.write(content)
 
     def get(self, path: str):
@@ -36,27 +37,26 @@ class Store:
             return self.serializer.decode(buf.read())
 
     def find(self, prefix_path: str) -> Path:
-        '''
+        """
         prefix_path is a special form of contraction. Let's say prefix_path=utils/time/5e
         If there is a single file starting with 5e inside utils/time, then that's found!
-        '''
+        """
         if os.path.exists(prefix_path):
             return prefix_path
-    
-        candidates = list(self.basedir.glob(prefix_path + '*'))
+
+        candidates = list(self.basedir.glob(prefix_path + "*"))
         if not candidates:
             raise FileNotFoundError("%s not found" % prefix_path)
         if len(candidates) > 1:
             raise ValueError("Ambiguous prefix %s" % prefix_path)
         return candidates[0]
-        
 
     def __iter__(self):
-        for urlfile in self.basedir.glob('**/*'):
+        for urlfile in self.basedir.glob("**/*"):
             if not urlfile.is_file():
                 continue
             data = self.get(urlfile)
-            ret = { 'Path': str(urlfile.relative_to(self.basedir)) }
+            ret = {"Path": str(urlfile.relative_to(self.basedir))}
             ret.update(data)
             yield ret
 
@@ -64,7 +64,7 @@ class Store:
         return Store(self.basedir / folder)
 
 
-HEADER_LINE = re.compile(r'^([^:]+): (.*)$')
+HEADER_LINE = re.compile(r"^([^:]+): (.*)$")
 
 
 class Serializer:
@@ -72,17 +72,17 @@ class Serializer:
         pass
 
     def encode(self, data: dict) -> str:
-        m = ''
-        tags = data.pop('tag', [])  # those are special!
+        m = ""
+        tags = data.pop("tag", [])  # those are special!
         for key in data:
-            m += '%s: %s\n' % (key.title(), str(data[key]).replace('\n', ' '))
+            m += "%s: %s\n" % (key.title(), str(data[key]).replace("\n", " "))
         for tag in tags:
-            m += '%s: %s\n' % ('Tag', tag)
+            m += "%s: %s\n" % ("Tag", tag)
         return m
 
     def decode(self, content: str) -> dict:
-        d: dict = {'Tag': []}
-        for num, line in enumerate(content.split('\n'), 1):
+        d: dict = {"Tag": []}
+        for num, line in enumerate(content.split("\n"), 1):
             if not line.strip():
                 continue
             m = HEADER_LINE.match(line)
@@ -91,15 +91,16 @@ class Serializer:
                 continue
             key, value = m.groups()
             key = key.title()
-            if key == 'Tag':
+            if key == "Tag":
                 d[key].append(value)
             else:
                 d[key] = value
         return d
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     import sys
+
     s = Store()
     # print(s.get(sys.argv[1]))
     for line in s.list(sys.argv[1]):

+ 3 - 0
setup.cfg

@@ -0,0 +1,3 @@
+[flake8]
+max-line-length=120
+ignore=E203,W503