feed 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495
  1. #!/usr/bin/env python3
  2. """
  3. Feed parser with many features
  4. from a feed, it supports filtering, subslicing, random picking
  5. Beside feeds, it supports picking files from directories
  6. """
  7. import datetime
  8. import logging
  9. import os
  10. import posixpath
  11. import random
  12. import re
  13. import urllib.request
  14. from argparse import ArgumentParser, ArgumentTypeError
  15. from bisect import bisect
  16. from collections import OrderedDict
  17. from subprocess import CalledProcessError, check_output
  18. from urllib.parse import unquote, urlparse
  19. import requests
  20. from lxml import html
  21. from pytimeparse.timeparse import timeparse
  22. def get_int(s):
  23. return int(re.findall(r"\d+", s)[0])
  24. def DurationType(arg):
  25. if arg.isdecimal():
  26. secs = int(arg)
  27. else:
  28. secs = timeparse(arg)
  29. if secs is None:
  30. raise ArgumentTypeError("%r is not a valid duration" % arg)
  31. return secs
  32. def TimeDeltaType(arg):
  33. if arg.isdecimal():
  34. secs = int(arg)
  35. else:
  36. secs = timeparse(arg)
  37. if secs is None:
  38. raise ArgumentTypeError("%r is not a valid time range" % arg)
  39. return datetime.timedelta(seconds=secs)
  40. def weighted_choice(values, weights):
  41. """
  42. random.choice with weights
  43. weights must be integers greater than 0.
  44. Their meaning is "relative", that is [1,2,3] is the same as [2,4,6]
  45. """
  46. assert len(values) == len(weights)
  47. total = 0
  48. cum_weights = []
  49. for w in weights:
  50. total += w
  51. cum_weights.append(total)
  52. x = random.random() * total
  53. i = bisect(cum_weights, x)
  54. return values[i]
  55. def delta_humanreadable(tdelta):
  56. if tdelta is None:
  57. return ""
  58. days = tdelta.days
  59. hours = (tdelta - datetime.timedelta(days=days)).seconds // 3600
  60. if days:
  61. return "{}d{}h".format(days, hours)
  62. return "{}h".format(hours)
  63. class Audio(object):
  64. def __init__(self, url, duration=None, date=None):
  65. self.url = url
  66. if duration is None:
  67. duration = get_duration(url.encode("utf-8"))
  68. self.duration = duration
  69. self.date = date
  70. self.end_date = datetime.datetime(9999, 12, 31, tzinfo=datetime.timezone.utc)
  71. def __str__(self):
  72. return self.url
  73. def __repr__(self):
  74. return "<Audio {} ({} {})>".format(
  75. self.url, self.duration, delta_humanreadable(self.age)
  76. )
  77. @property
  78. def urls(self):
  79. return [self.url]
  80. @property
  81. def age(self):
  82. if self.date is None:
  83. return None
  84. now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
  85. return now - self.date
  86. @property
  87. def valid(self):
  88. return self.end_date >= datetime.datetime.utcnow().replace(
  89. tzinfo=datetime.timezone.utc
  90. )
  91. class AudioGroup(list):
  92. def __init__(self, description=None):
  93. self.description = description or ""
  94. self.audios = []
  95. def __len__(self):
  96. return len(self.audios)
  97. def append(self, arg):
  98. self.audios.append(arg)
  99. def __str__(self):
  100. return "\n".join(str(a) for a in self.audios)
  101. def __repr__(self):
  102. return '<AudioGroup "{}" ({} {})\n{} >'.format(
  103. self.description,
  104. self.duration,
  105. delta_humanreadable(self.age),
  106. "\n".join(" " + repr(a) for a in self.audios),
  107. )
  108. @property
  109. def duration(self):
  110. return sum(a.duration for a in self.audios if a.duration is not None)
  111. @property
  112. def urls(self):
  113. return [a.url for a in self.audios]
  114. @property
  115. def date(self):
  116. for a in self.audios:
  117. if hasattr(a, "date"):
  118. return a.date
  119. return None
  120. @property
  121. def age(self):
  122. if self.date is None:
  123. return None
  124. now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
  125. return now - self.date
  126. @property
  127. def valid(self):
  128. return len(self.audios) > 0
  129. def get_tree(feed_url):
  130. if feed_url.startswith("http:") or feed_url.startswith("https:"):
  131. tree = html.fromstring(requests.get(feed_url).content)
  132. else:
  133. if not os.path.exists(feed_url):
  134. raise ValueError("file not found: {}".format(feed_url))
  135. tree = html.parse(open(feed_url))
  136. return tree
  137. def get_audio_from_description(text):
  138. # non-empty lines
  139. lines = [line.strip() for line in text.split("\n") if line.strip()]
  140. url = lines[0]
  141. duration = None
  142. metadata = {}
  143. for line in text.split("\n")[1:]:
  144. if line.strip() and "=" in line:
  145. metadata[line.split("=")[0]] = line.split("=")[1]
  146. if "durata" in metadata:
  147. metadata["durata"] = get_int(metadata["durata"])
  148. if "txdate" in metadata:
  149. try:
  150. metadata["txdate"] = datetime.datetime.strptime(
  151. metadata["txdate"], "%Y-%m-%dT%H:%M:%S%z"
  152. )
  153. except ValueError:
  154. logging.warning("could not parse txdate %s", metadata["txdate"])
  155. del metadata["txdate"]
  156. a = Audio(
  157. unquote(url),
  158. duration=metadata.get("durata", None),
  159. date=metadata.get("txdate", None),
  160. )
  161. if "txdate" in metadata and "replica" in metadata:
  162. if metadata["replica"].endswith("g"):
  163. a.end_date = metadata["txdate"] + datetime.timedelta(
  164. days=get_int(metadata["replica"])
  165. )
  166. return a
  167. # copied from larigira.fsutils
  168. def scan_dir_audio(dirname, extensions=("mp3", "oga", "wav", "ogg")):
  169. for root, dirnames, filenames in os.walk(dirname):
  170. for fname in filenames:
  171. if fname.split(".")[-1].lower() in extensions:
  172. yield os.path.join(root, fname)
  173. def get_audio_from_dir(dirpath):
  174. fpaths = scan_dir_audio(dirpath)
  175. return [
  176. Audio(
  177. "file://" + os.path.realpath(u),
  178. date=datetime.datetime.fromtimestamp(os.path.getmtime(u)).replace(
  179. tzinfo=datetime.timezone.utc
  180. ),
  181. )
  182. for u in fpaths
  183. ]
  184. def get_item_date(el):
  185. el_date = el.find("pubdate")
  186. if el_date is not None:
  187. return datetime.datetime.strptime(el_date.text, "%Y-%m-%dT%H:%M:%S%z")
  188. return None
  189. def get_urls(tree):
  190. items = tree.xpath("//item")
  191. for it in items:
  192. title = it.find("title").text
  193. el_body = it.find("description")
  194. if el_body is not None:
  195. url = el_body.text
  196. try:
  197. audio = get_audio_from_description(url)
  198. except Exception as exc:
  199. logging.info("error getting duration for `%s`" % title)
  200. continue
  201. if audio.date is None:
  202. audio.date = get_item_date(it)
  203. yield audio
  204. def get_grouped_urls(tree):
  205. groups = OrderedDict()
  206. items = tree.xpath("//item")
  207. for item in items:
  208. guid = item.xpath("guid")[0].text.strip()
  209. if guid not in groups:
  210. groups[guid] = AudioGroup(guid)
  211. audio = get_audio_from_description(item.xpath("description")[0].text)
  212. audio.date = get_item_date(item)
  213. if audio.valid:
  214. groups[guid].append(audio)
  215. return groups
  216. def get_duration(url):
  217. try:
  218. lineout = check_output(
  219. ["ffprobe", "-v", "error", "-show_entries", "format=duration", "-i", url]
  220. ).split(b"\n")
  221. except CalledProcessError as exc:
  222. raise ValueError("error probing `%s`" % url) from exc
  223. duration = next(l for l in lineout if l.startswith(b"duration="))
  224. value = duration.split(b"=")[1]
  225. return int(float(value))
  226. HELP = """
  227. Collect audio informations from multiple sources (XML feeds).
  228. Audios are (in that order):
  229. 1. Collected from feeds; (grouped by article if --group is used)
  230. 2. Filtered; everything that does not match with requirements is excluded
  231. 3. Sorted; even randomly
  232. 4. Sliced; take HOWMANY elements, skipping START elements
  233. 5. (if --copy) Copied
  234. Usage: """
  235. def get_parser():
  236. p = ArgumentParser(HELP)
  237. src = p.add_argument_group("sources", "How to deal with sources")
  238. src.add_argument(
  239. "--source-weights", help='Select only one "source" based on this weights'
  240. )
  241. src.add_argument(
  242. "--group",
  243. default=False,
  244. action="store_true",
  245. help="Group audios that belong to the same article",
  246. )
  247. filters = p.add_argument_group(
  248. "filters", "Select only items that match " "these conditions"
  249. )
  250. filters.add_argument(
  251. "--min-len",
  252. default=0,
  253. type=DurationType,
  254. help="Exclude any audio that is shorter " "than MIN_LEN seconds",
  255. )
  256. filters.add_argument(
  257. "--max-len",
  258. default=0,
  259. type=DurationType,
  260. help="Exclude any audio that is longer " "than MAX_LEN seconds",
  261. )
  262. filters.add_argument(
  263. "--sort-by", default="no", type=str, choices=("random", "date", "duration")
  264. )
  265. filters.add_argument(
  266. "--reverse", default=False, action="store_true", help="Reverse list order"
  267. )
  268. filters.add_argument(
  269. "--min-age",
  270. default=datetime.timedelta(),
  271. type=TimeDeltaType,
  272. help="Exclude audio more recent than MIN_AGE",
  273. )
  274. filters.add_argument(
  275. "--max-age",
  276. default=datetime.timedelta(),
  277. type=TimeDeltaType,
  278. help="Exclude audio older than MAX_AGE",
  279. )
  280. p.add_argument(
  281. "--start",
  282. default=0,
  283. type=int,
  284. help="0-indexed start number. " "By default, play from most recent",
  285. )
  286. p.add_argument(
  287. "--howmany", default=1, type=int, help="If not specified, only 1 will be played"
  288. )
  289. p.add_argument(
  290. "--slotsize", type=int, help="Seconds between each audio. Still unsupported"
  291. )
  292. general = p.add_argument_group("general", "General options")
  293. general.add_argument(
  294. "--copy", help="Copy files to $TMPDIR", default=False, action="store_true"
  295. )
  296. general.add_argument(
  297. "--debug", help="Debug messages", default=False, action="store_true"
  298. )
  299. p.add_argument("urls", metavar="URL", nargs="+")
  300. return p
  301. def put(audio, copy=False):
  302. if not copy:
  303. for url in audio.urls:
  304. print(url)
  305. else:
  306. for url in audio.urls:
  307. if url.split(":")[0] in ("http", "https"):
  308. destdir = os.environ.get("TMPDIR", ".")
  309. fname = posixpath.basename(urlparse(url).path)
  310. # sanitize
  311. fname = "".join(
  312. c for c in fname if c.isalnum() or c in list("._-")
  313. ).rstrip()
  314. dest = os.path.join(destdir, fname)
  315. os.makedirs(destdir, exist_ok=True)
  316. fname, headers = urllib.request.urlretrieve(url, dest)
  317. print("file://%s" % os.path.realpath(fname))
  318. else:
  319. # FIXME: file:// urls are just copied
  320. print(url)
  321. def retrieve(url, args):
  322. """
  323. returns a list of Audios or a list of AudioGroups
  324. """
  325. if not args.group:
  326. if os.path.isdir(url):
  327. audiodir = get_audio_from_dir(url)
  328. return audiodir
  329. elif url.startswith("http:") or url.startswith("https:") or os.path.isfile(url):
  330. return get_urls(get_tree(url))
  331. else:
  332. logging.info("unsupported url `%s`", url)
  333. return []
  334. else: # group
  335. if os.path.isdir(url):
  336. audiodir = get_audio_from_dir(url)
  337. agroups = []
  338. for a in audiodir:
  339. ag = AudioGroup(os.path.basename(a.url))
  340. ag.append(a)
  341. agroups.append(ag)
  342. return agroups
  343. elif url.startswith("http:") or url.startswith("https:") or os.path.isfile(url):
  344. groups = get_grouped_urls(get_tree(url))
  345. return groups.values()
  346. else:
  347. logging.info("unsupported url `%s`", url)
  348. return []
  349. def audio_passes_filters(audio, args):
  350. if not audio.valid:
  351. return False
  352. if args.max_len and audio.duration > args.max_len:
  353. return False
  354. if args.min_len and audio.duration < args.min_len:
  355. return False
  356. if args.min_age.total_seconds() and audio.age < args.min_age:
  357. return False
  358. if args.max_age.total_seconds() and audio.age > args.max_age:
  359. return False
  360. return True
  361. def main():
  362. parser = get_parser()
  363. args = parser.parse_args()
  364. if not args.debug:
  365. logging.basicConfig(level=logging.WARNING)
  366. else:
  367. logging.basicConfig(level=logging.DEBUG)
  368. sources = args.urls
  369. if args.source_weights:
  370. weights = tuple(map(int, args.source_weights.split(":")))
  371. if len(weights) != len(sources):
  372. parser.exit(
  373. status=2, message="Weight must be in the" " same number as sources\n"
  374. )
  375. sources = [weighted_choice(sources, weights)]
  376. audios = []
  377. for url in sources:
  378. url_audios = retrieve(url, args)
  379. audios += [au for au in url_audios if audio_passes_filters(au, args)]
  380. # sort
  381. if args.sort_by == "random":
  382. random.shuffle(audios)
  383. elif args.sort_by == "date":
  384. audios.sort(key=lambda x: x.age)
  385. elif args.sort_by == "duration":
  386. audios.sort(key=lambda x: x.duration)
  387. if args.reverse:
  388. audios.reverse()
  389. # slice
  390. audios = audios[args.start :]
  391. audios = audios[: args.howmany]
  392. # the for loop excludes the last one
  393. # this is to support the --slotsize option
  394. if not audios:
  395. return
  396. for audio in audios[:-1]:
  397. if args.debug:
  398. print(repr(audio))
  399. else:
  400. put(audio, args.copy)
  401. if args.slotsize is not None:
  402. duration = audio.duration
  403. if duration < args.slotsize:
  404. print("## musica per {} secondi".format(args.slotsize - duration))
  405. # finally, the last one
  406. if args.debug:
  407. print(repr(audios[-1]))
  408. else:
  409. put(audios[-1], args.copy)
  410. # else: # grouping; TODO: support slotsize
  411. # for item in groups:
  412. # if args.debug:
  413. # print('#', item, groups[item].duration)
  414. # print(groups[item])
  415. if __name__ == "__main__":
  416. main()