feed 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454
  1. #!/usr/bin/env python3
  2. '''
  3. Feed parser with many features
  4. from a feed, it supports filtering, subslicing, random picking
  5. Beside feeds, it supports picking files from directories
  6. '''
  7. import os
  8. import logging
  9. from argparse import ArgumentParser, ArgumentTypeError
  10. from subprocess import check_output, CalledProcessError
  11. from collections import OrderedDict
  12. import re
  13. import urllib.request
  14. from urllib.parse import urlparse, unquote
  15. import posixpath
  16. import random
  17. from bisect import bisect
  18. import datetime
  19. from lxml import html
  20. import requests
  21. from pytimeparse.timeparse import timeparse
  22. def get_int(s):
  23. return int(re.findall(r'\d+', s)[0])
  24. def DurationType(arg):
  25. if arg.isdecimal():
  26. secs = int(arg)
  27. else:
  28. secs = timeparse(arg)
  29. if secs is None:
  30. raise ArgumentTypeError('%r is not a valid duration' % arg)
  31. return secs
  32. def TimeDeltaType(arg):
  33. if arg.isdecimal():
  34. secs = int(arg)
  35. else:
  36. secs = timeparse(arg)
  37. if secs is None:
  38. raise ArgumentTypeError('%r is not a valid time range' % arg)
  39. return datetime.timedelta(seconds=secs)
  40. def weighted_choice(values, weights):
  41. '''
  42. random.choice with weights
  43. weights must be integers greater than 0.
  44. Their meaning is "relative", that is [1,2,3] is the same as [2,4,6]
  45. '''
  46. assert len(values) == len(weights)
  47. total = 0
  48. cum_weights = []
  49. for w in weights:
  50. total += w
  51. cum_weights.append(total)
  52. x = random.random() * total
  53. i = bisect(cum_weights, x)
  54. return values[i]
  55. def delta_humanreadable(tdelta):
  56. if tdelta is None:
  57. return ''
  58. days = tdelta.days
  59. hours = (tdelta - datetime.timedelta(days=days)).seconds // 3600
  60. if days:
  61. return '{}d{}h'.format(days, hours)
  62. return '{}h'.format(hours)
  63. class Audio(object):
  64. def __init__(self, url, duration=None, date=None):
  65. self.url = url
  66. if duration is None:
  67. duration = get_duration(url.encode('utf-8'))
  68. self.duration = duration
  69. self.date = date
  70. self.end_date = datetime.datetime(9999, 12, 31, tzinfo=datetime.timezone.utc)
  71. def __str__(self):
  72. return self.url
  73. def __repr__(self):
  74. return '<Audio {} ({} {})>'.format(self.url, self.duration,
  75. delta_humanreadable(self.age))
  76. @property
  77. def urls(self):
  78. return [self.url]
  79. @property
  80. def age(self):
  81. if self.date is None:
  82. return None
  83. now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
  84. return now - self.date
  85. @property
  86. def valid(self):
  87. return self.end_date >= datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
  88. class AudioGroup(list):
  89. def __init__(self, description=None):
  90. self.description = description or ''
  91. self.audios = []
  92. def __len__(self):
  93. return len(self.audios)
  94. def append(self, arg):
  95. self.audios.append(arg)
  96. def __str__(self):
  97. return '\n'.join(str(a) for a in self.audios)
  98. def __repr__(self):
  99. return '<AudioGroup "{}" ({} {})\n{} >'.\
  100. format(self.description, self.duration,
  101. delta_humanreadable(self.age),
  102. '\n'.join(' ' + repr(a) for a in self.audios))
  103. @property
  104. def duration(self):
  105. return sum(a.duration for a in self.audios if a.duration is not None)
  106. @property
  107. def urls(self):
  108. return [a.url for a in self.audios]
  109. @property
  110. def date(self):
  111. for a in self.audios:
  112. if hasattr(a, 'date'):
  113. return a.date
  114. return None
  115. @property
  116. def age(self):
  117. if self.date is None:
  118. return None
  119. now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
  120. return now - self.date
  121. @property
  122. def valid(self):
  123. return len(self.audios) > 0
  124. def get_tree(feed_url):
  125. if feed_url.startswith('http:') or feed_url.startswith('https:'):
  126. tree = html.fromstring(requests.get(feed_url).content)
  127. else:
  128. if not os.path.exists(feed_url):
  129. raise ValueError("file not found: {}".format(feed_url))
  130. tree = html.parse(open(feed_url))
  131. return tree
  132. def get_audio_from_description(text):
  133. # non-empty lines
  134. lines = [line.strip()
  135. for line in text.split('\n')
  136. if line.strip()]
  137. url = lines[0]
  138. duration = None
  139. metadata = {}
  140. for line in text.split('\n')[1:]:
  141. if line.strip() and '=' in line:
  142. metadata[line.split('=')[0]] = line.split('=')[1]
  143. if 'durata' in metadata:
  144. metadata['durata'] = get_int(metadata['durata'])
  145. if 'txdate' in metadata:
  146. try:
  147. metadata['txdate'] = datetime.datetime.strptime(
  148. metadata['txdate'], '%Y-%m-%dT%H:%M:%S%z')
  149. except ValueError:
  150. logging.warning('could not parse txdate %s', metadata['txdate'])
  151. del metadata['txdate']
  152. a = Audio(unquote(url),
  153. duration=metadata.get('durata', None),
  154. date=metadata.get('txdate', None))
  155. if 'txdate' in metadata and 'replica' in metadata:
  156. if metadata['replica'].endswith('g'):
  157. a.end_date = metadata['txdate'] + datetime.timedelta(
  158. days=get_int(metadata['replica']))
  159. return a
  160. # copied from larigira.fsutils
  161. def scan_dir_audio(dirname, extensions=('mp3', 'oga', 'wav', 'ogg')):
  162. for root, dirnames, filenames in os.walk(dirname):
  163. for fname in filenames:
  164. if fname.split('.')[-1].lower() in extensions:
  165. yield os.path.join(root, fname)
  166. def get_audio_from_dir(dirpath):
  167. fpaths = scan_dir_audio(dirpath)
  168. return [Audio('file://' + os.path.realpath(u),
  169. date=datetime.datetime.fromtimestamp(os.path.getmtime(u)).
  170. replace(tzinfo=datetime.timezone.utc))
  171. for u in fpaths]
  172. def get_item_date(el):
  173. el_date = el.find('pubdate')
  174. if el_date is not None:
  175. return datetime.datetime.strptime(
  176. el_date.text, '%Y-%m-%dT%H:%M:%S%z')
  177. return None
  178. def get_urls(tree):
  179. items = tree.xpath('//item')
  180. for it in items:
  181. title = it.find('title').text
  182. el_body = it.find('description')
  183. if el_body is not None:
  184. url = el_body.text
  185. try:
  186. audio = get_audio_from_description(url)
  187. except Exception as exc:
  188. logging.info('error getting duration for `%s`' % title)
  189. continue
  190. if audio.date is None:
  191. audio.date = get_item_date(it)
  192. yield audio
  193. def get_grouped_urls(tree):
  194. groups = OrderedDict()
  195. items = tree.xpath('//item')
  196. for item in items:
  197. guid = item.xpath('guid')[0].text.strip()
  198. if guid not in groups:
  199. groups[guid] = AudioGroup(guid)
  200. audio = get_audio_from_description(item.xpath('description')[0].text)
  201. audio.date = get_item_date(item)
  202. if audio.valid:
  203. groups[guid].append(audio)
  204. return groups
  205. def get_duration(url):
  206. try:
  207. lineout = check_output(['ffprobe', '-v', 'error',
  208. '-show_entries', 'format=duration',
  209. '-i', url]).split(b'\n')
  210. except CalledProcessError as exc:
  211. raise ValueError('error probing `%s`' % url) from exc
  212. duration = next(l for l in lineout if l.startswith(b'duration='))
  213. value = duration.split(b'=')[1]
  214. return int(float(value))
  215. HELP = '''
  216. Collect audio informations from multiple sources (XML feeds).
  217. Audios are (in that order):
  218. 1. Collected from feeds; (grouped by article if --group is used)
  219. 2. Filtered; everything that does not match with requirements is excluded
  220. 3. Sorted; even randomly
  221. 4. Sliced; take HOWMANY elements, skipping START elements
  222. 5. (if --copy) Copied
  223. Usage: '''
  224. def get_parser():
  225. p = ArgumentParser(HELP)
  226. src = p.add_argument_group('sources', 'How to deal with sources')
  227. src.add_argument('--source-weights',
  228. help='Select only one "source" based on this weights')
  229. src.add_argument('--group', default=False, action='store_true',
  230. help='Group audios that belong to the same article')
  231. filters = p.add_argument_group('filters', 'Select only items that match '
  232. 'these conditions')
  233. filters.add_argument('--min-len', default=0, type=DurationType,
  234. help='Exclude any audio that is shorter '
  235. 'than MIN_LEN seconds')
  236. filters.add_argument('--max-len', default=0, type=DurationType,
  237. help='Exclude any audio that is longer '
  238. 'than MAX_LEN seconds')
  239. filters.add_argument('--sort-by', default='no', type=str,
  240. choices=('random', 'date', 'duration'))
  241. filters.add_argument('--reverse', default=False,
  242. action='store_true', help='Reverse list order')
  243. filters.add_argument('--min-age', default=datetime.timedelta(),
  244. type=TimeDeltaType,
  245. help='Exclude audio more recent than MIN_AGE')
  246. filters.add_argument('--max-age', default=datetime.timedelta(),
  247. type=TimeDeltaType,
  248. help='Exclude audio older than MAX_AGE')
  249. p.add_argument('--start', default=0, type=int,
  250. help='0-indexed start number. '
  251. 'By default, play from most recent')
  252. p.add_argument('--howmany', default=1, type=int,
  253. help='If not specified, only 1 will be played')
  254. p.add_argument('--slotsize', type=int,
  255. help='Seconds between each audio. Still unsupported')
  256. general = p.add_argument_group('general', 'General options')
  257. general.add_argument('--copy', help='Copy files to $TMPDIR', default=False,
  258. action='store_true')
  259. general.add_argument('--debug', help='Debug messages', default=False,
  260. action='store_true')
  261. p.add_argument('urls', metavar='URL', nargs='+')
  262. return p
  263. def put(audio, copy=False):
  264. if not copy:
  265. for url in audio.urls:
  266. print(url)
  267. else:
  268. for url in audio.urls:
  269. if url.split(':')[0] in ('http', 'https'):
  270. destdir = (os.environ.get('TMPDIR', '.'))
  271. fname = posixpath.basename(urlparse(url).path)
  272. # sanitize
  273. fname = "".join(c for c in fname
  274. if c.isalnum() or c in list('._-')).rstrip()
  275. dest = os.path.join(destdir, fname)
  276. os.makedirs(destdir, exist_ok=True)
  277. fname, headers = urllib.request.urlretrieve(url, dest)
  278. print('file://%s' % os.path.realpath(fname))
  279. else:
  280. # FIXME: file:// urls are just copied
  281. print(url)
  282. def main():
  283. parser = get_parser()
  284. args = parser.parse_args()
  285. if not args.debug:
  286. logging.basicConfig(level=logging.WARNING)
  287. else:
  288. logging.basicConfig(level=logging.DEBUG)
  289. sources = args.urls
  290. if args.source_weights:
  291. weights = tuple(map(int, args.source_weights.split(':')))
  292. if len(weights) != len(sources):
  293. parser.exit(status=2, message='Weight must be in the'
  294. ' same number as sources\n')
  295. sources = [weighted_choice(sources, weights)]
  296. audios = []
  297. for url in sources:
  298. if not args.group:
  299. if os.path.isdir(url):
  300. audiodir = get_audio_from_dir(url)
  301. audios += audiodir
  302. elif url.startswith('http:') or url.startswith('https:') \
  303. or os.path.isfile(url):
  304. audios += get_urls(get_tree(url))
  305. else:
  306. logging.info('unsupported url `%s`', url)
  307. audios = [audio for audio in audios if
  308. (audio.valid) and
  309. (args.max_len == 0 or
  310. audio.duration <= args.max_len) and
  311. (args.min_len == 0 or
  312. audio.duration >= args.min_len) and
  313. (args.min_age.total_seconds() == 0 or
  314. audio.age >= args.min_age) and
  315. (args.max_age.total_seconds() == 0 or
  316. audio.age <= args.max_age)
  317. ]
  318. else: # group
  319. if os.path.isdir(url):
  320. audiodir = get_audio_from_dir(url)
  321. agroups = []
  322. for a in audiodir:
  323. ag = AudioGroup(os.path.basename(a.url))
  324. ag.append(a)
  325. agroups.append(ag)
  326. elif url.startswith('http:') or url.startswith('https:') \
  327. or os.path.isfile(url):
  328. groups = get_grouped_urls(get_tree(url))
  329. agroups = groups.values()
  330. else:
  331. logging.info('unsupported url `%s`', url)
  332. audios += [g for g in agroups
  333. if
  334. (g.valid) and
  335. (args.max_len == 0 or
  336. g.duration <= args.max_len) and
  337. (args.min_len == 0 or
  338. g.duration >= args.max_len) and
  339. (args.min_age.total_seconds() == 0 or
  340. g.age >= args.min_age) and
  341. (args.max_age.total_seconds() == 0 or
  342. g.age <= args.max_age)
  343. ]
  344. # sort
  345. if args.sort_by == 'random':
  346. random.shuffle(audios)
  347. elif args.sort_by == 'date':
  348. audios.sort(key=lambda x: x.age)
  349. elif args.sort_by == 'duration':
  350. audios.sort(key=lambda x: x.duration)
  351. if args.reverse:
  352. audios.reverse()
  353. # slice
  354. audios = audios[args.start:]
  355. audios = audios[:args.howmany]
  356. # the for loop excludes the last one
  357. # this is to support the --slotsize option
  358. if not audios:
  359. return
  360. for audio in audios[:-1]:
  361. if args.debug:
  362. print(repr(audio))
  363. else:
  364. put(audio, args.copy)
  365. if args.slotsize is not None:
  366. duration = audio.duration
  367. if duration < args.slotsize:
  368. print('## musica per {} secondi'
  369. .format(args.slotsize - duration))
  370. # finally, the last one
  371. if args.debug:
  372. print(repr(audios[-1]))
  373. else:
  374. put(audios[-1], args.copy)
  375. # else: # grouping; TODO: support slotsize
  376. # for item in groups:
  377. # if args.debug:
  378. # print('#', item, groups[item].duration)
  379. # print(groups[item])
  380. if __name__ == '__main__':
  381. main()