sampler.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. from enum import Enum
  2. import os
  3. import numpy as np
  4. import librosa
  5. import math
  6. from scipy.interpolate import interp1d
  7. import matplotlib.pyplot as plt
  8. import random
  9. import shutil
  10. import subprocess
  11. import tempfile
  12. import textwrap
  13. import click
  14. import tensorflow as tf
  15. from PIL import Image, ImageEnhance
  16. from noisemaker.composer import EFFECT_PRESETS, GENERATOR_PRESETS, reload_presets
  17. from noisemaker.constants import ColorSpace, ValueDistribution
  18. from noisemaker.presets import PRESETS
  19. import noisemaker.ai as ai
  20. import noisemaker.dreamer as dreamer
  21. import noisemaker.cli as cli
  22. import noisemaker.generators as generators
  23. import noisemaker.effects as effects
  24. import noisemaker.util as util
  25. import noisemaker.value as value
  26. MAX_SEED_VALUE = 2 ** 32 - 1
  27. os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
  28. reload_presets(PRESETS)
  29. @click.group(context_settings=cli.CLICK_CONTEXT_SETTINGS)
  30. def main():
  31. pass
  32. def anlyzeSound(audio_input_file):
  33. # Assuming that into "generated" directoruy U've already create "project_name" subfolder
  34. #audio_input_file = '/home/lalo/data/studio_grafica/deforum/stars_clip_r2.wav'
  35. #audio_input_file = '/home/lalo/data/studio_suono/ardourprojects/space/export/star_r1_sessione_20240310_msub07.wav'
  36. #audio_input_file = '/home/lalo/data/studio_suono/spx/231104_001_m01.wav'
  37. #audio_input_file = '/home/lalo/data/studio_grafica/deforum/eucrasy_r1_sample.wav'
  38. #audio_input_file = '/home/lalo/data/studio_grafica/deforum/eucrasy_r1.wav'
  39. #audio_input_file = '/home/lalo/data/studio_grafica/deforum/eucrasy_short_r1.wav'
  40. #audio_input_file = '/home/lalo/data/studio_suono/231014_002_mastered_r2_clip.WAV'
  41. #audio_input_file = 'C:/Users/LucaConte/Music/lc_music/wilson_r1_mm.wav'
  42. # Store the sampling rate as `sr`
  43. fps = 24
  44. flength = 22050//fps
  45. #audio frame size is 22050/30=735
  46. wave, sr = librosa.load(audio_input_file)
  47. rms = librosa.feature.rms(y=wave, frame_length=flength, hop_length=flength)
  48. #rms = librosa.feature.rms(y=wave, frame_length=735, hop_length=735)
  49. cent = librosa.feature.spectral_centroid(y=wave, sr=sr,n_fft=flength, hop_length=flength)
  50. zcr = librosa.feature.zero_crossing_rate(y=wave, frame_length=flength, hop_length=flength)
  51. duration = int(math.ceil(librosa.get_duration(y=wave, sr=sr)))
  52. #frames = duration * fps
  53. #:w
  54. # sr.len /22050 = duration
  55. # rms.len * 30 = duration
  56. # rms[i] mi da la potenza media del frame iesimo
  57. #generateSoundVariations("bubble-machine", 3793866858, rms,5000,7500)
  58. #generateSoundVariations("sands-of-time", 2226183906, rms,-1,2500)
  59. #generateSoundVariations("sands-of-time", 2226183906, rms,2500,5000)
  60. #generateSoundVariations("sands-of-time", 2226183906, rms,5000,7500)
  61. #generateSoundVariations("sands-of-time", 2226183906, rms,7500,10000)
  62. #generateSoundVariations("sands-of-time", 2226183906, rms,10000,10333)
  63. #generateSamples()
  64. #postprocessBrightness("/tmp/",cent, 8316,10000)
  65. return rms, cent, zcr
  66. def width_option(default=1024, **attrs):
  67. attrs.setdefault("help", "Output width, in pixels")
  68. return int_option("--width", default=default, **attrs)
  69. #gli estremi frame start e frame stop sono esclusi
  70. # python sampler.py genframes --width 666 --height 666 --preset bubble-machine --framestart 1 --framestop 55
  71. @main.command()
  72. @click.option('--width', required=True, type=int, default=1024)
  73. @click.option('--height', required=True, type=int, default=1024)
  74. @click.option('--seed', required=False, type=int, default=None)
  75. @click.option('--out-dir', required=True, type=str, default=tempfile.gettempdir())
  76. @click.option('--framestart', required=False, type=int, default=1)
  77. @click.option('--framestop', required=False, type=int, default=-1)
  78. @click.option('--audiofile', required=True, type=str, default='./dummy.wav')
  79. @click.option('--preset', type=click.Choice(["random"] + sorted(GENERATOR_PRESETS)))
  80. @click.pass_context
  81. def genframes(ctx,width, height,preset, seed, framestart, framestop, audiofile, out_dir):
  82. rms, centroids, zcr = anlyzeSound(audiofile)
  83. if not seed:
  84. seed = random.randint(1, MAX_SEED_VALUE)
  85. print(f"(total frames: {len(rms[0])}) (framestart: {framestart}) (framestop: {framestop})")
  86. time_dividend =500
  87. s = interp1d([min(rms[0]), max(rms[0])], [0, 0.9])
  88. trms=rms[0]
  89. mean = np.mean(trms)
  90. if framestop<0:
  91. framestop = len(rms[0])
  92. for frame in range(framestart, framestop):
  93. try:
  94. speed=abs(mean - float(format(trms[frame], '.3f')))
  95. #speed=float(format(trms[frame], '.3f'))
  96. #print(f"(speed: {speed}) (trms: {trms[frame]}) )")
  97. time = (float)((frame % time_dividend)/time_dividend)
  98. filename = out_dir+ '/' + preset + "_" + str(seed)+ "_" + str(frame).zfill(10) +".png"
  99. print(f"(speed: {speed}) (time: {time}) (filename: {filename})")
  100. generate(1080,1080,time,speed,seed,filename,False,False,False,False,False,False,"",False,False,preset)
  101. #generate(1024,1024,ftime,1,seed,"/tmp/" + str(frame).zfill(10) +".png",False,False,False,False,False,False,"",False,False,preset_name)
  102. #generate(1024,1024,0.0,ftime,seed,"/tmp/" + str(frame).zfill(10) +"_" + preset_name + ".png",False,False,False,False,False,False,"",False,False,preset_name)
  103. except Exception as e:
  104. print(f"Exception {e} on Preset: {preset}")
  105. continue
  106. @main.command()
  107. @click.pass_context
  108. def testpillow(ctx):
  109. image = Image.open('/tmp/the-inward-spiral_3917354306_0_0.23284526795328386.png')
  110. brightness_factor = 3.5 # Increase brightness by 50%
  111. enhancer = ImageEnhance.Brightness(image)
  112. brightened_image = enhancer.enhance(brightness_factor)
  113. contrast_factor = 4.2 # Increase contrast by 20%
  114. enhancer = ImageEnhance.Contrast(image)
  115. contrasted_image = enhancer.enhance(contrast_factor)
  116. brightened_image.save("/tmp/alt/brightened_image.png")
  117. contrasted_image.save("/tmp/alt/contrasted_image.png")
  118. def postprocessBrightness(srcdir,centroids,framemin=0,framemax=9999999999):
  119. c = interp1d([centroids.min(), centroids.max()], [1, 4.5])
  120. items = os.listdir(srcdir)
  121. sorted_items = sorted(items)
  122. for f in sorted_items:
  123. if f.endswith(".png"):
  124. frame = int(f.split("_")[-1].split(".")[0])@click.command(context_settings=dict(
  125. ignore_unknown_options=True,
  126. ))
  127. if frame >= framemin and frame < framemax:
  128. image = Image.open(srcdir+f)
  129. enhancer = ImageEnhance.Brightness(image)
  130. brightened_image = enhancer.enhance(c(centroids[0][frame]))
  131. print(f'File: {f}')
  132. brightened_image.save(srcdir+"b_"+f)
  133. def testCombinedVariations(preset_name):
  134. seed = random.randint(1, MAX_SEED_VALUE)
  135. times = 0
  136. for speed in range(0,999,1):
  137. try:
  138. if(times == 0):
  139. times = random.randint(1, 30)
  140. time = round(random.uniform(0, 1), 1)
  141. fspeed = (float)(speed/100)
  142. filename = "/tmp/"+ preset_name + "_" + str(seed)+ "_" + str(speed).zfill(10) +".png"
  143. print(f"(speed: {speed}) (time: {time}) (times: {times}) (filename: {filename})")
  144. times -= 1
  145. #generate(600,600,time,fspeed,seed,filename,False,False,False,False,False,False,"",False,False,preset_name)
  146. except Exception as e:
  147. print(f"Exception on Preset: {preset_name}")
  148. continue
  149. def generateSpeedVariations(preset_name):
  150. seed = random.randint(1, MAX_SEED_VALUE)
  151. for speed in range(0,999,1):
  152. try:
  153. generate(600,600,0.1,speed,seed,"/tmp/"+ preset_name + "_" + str(seed)+ "_" + str(speed).zfill(10) +".png",False,False,False,False,False,False,"",False,False,preset_name)
  154. except Exception as e:
  155. print(f"Exception on Preset: {preset_name}")
  156. continue
  157. @main.command()
  158. @click.option('--iterations', required=False, type=int, default=100)
  159. @click.option('--preset', type=click.Choice(["random"] + sorted(GENERATOR_PRESETS)))
  160. @click.pass_context
  161. def gentimevariations(ctx,preset,iterations):
  162. seed = random.randint(1, MAX_SEED_VALUE)
  163. speed = random.random()
  164. for time in range(0, iterations, 1):
  165. try:
  166. ftime = (float)(time/iterations)
  167. print(str(ftime))
  168. filename = tempfile.gettempdir() + "/" + preset+ "_" + str(seed) + "_" + str(time) + "_" + str(speed) + ".png"
  169. generate(600,600,ftime,speed,seed, filename,False,False,False,False,False,False,"",False,False,preset)
  170. except Exception as e:
  171. print(f"Exception on Preset: {preset}")
  172. continue
  173. @main.command()
  174. @click.pass_context
  175. def gensamples(ctx):
  176. for preset_name, preset_data in PRESETS().items():
  177. try:
  178. seed = random.randint(1, MAX_SEED_VALUE)
  179. time = random.random()
  180. speed = random.random()
  181. filename = tempfile.gettempdir() + "/" + preset_name + "_" + str(seed) + "_" + str(time)+ "_" + str(speed) + ".png"
  182. print(f"Going to generate: {filename}")
  183. generate(1024, 1024, time, speed, seed, filename ,
  184. False, False, False, False, False, False, "", False, False, preset_name)
  185. except Exception as e:
  186. print(f"Exception on Preset: {preset_name}")
  187. continue
  188. @main.command()
  189. @click.option('--iterations', required=False, type=int, default=10)
  190. @click.option('--preset', type=click.Choice(["random"] + sorted(GENERATOR_PRESETS)))
  191. @click.pass_context
  192. def genseedsvariations(ctx,preset,iterations):
  193. for i in range(iterations):
  194. try:
  195. seed = random.randint(1, MAX_SEED_VALUE)
  196. time = random.random()
  197. speed = random.random()
  198. filename = tempfile.gettempdir() + "/" + preset + "_" + str(seed) + "_" + str(time) + "_" + str(
  199. speed) + ".png"
  200. print(f"Going to generate: {filename}")
  201. generate(1024, 1024, time, speed, seed, filename,
  202. False, False, False, False, False, False, "", False, False, preset)
  203. except Exception as e:
  204. print(f"Exception on Preset: {preset}")
  205. continue
  206. def generate(width, height, time, speed, seed, filename, with_alpha, with_supersample, with_fxaa, with_ai, with_upscale,
  207. with_alt_text, stability_model, debug_print, debug_out, preset_name):
  208. if not seed:
  209. seed = random.randint(1, MAX_SEED_VALUE)
  210. value.set_seed(seed)
  211. reload_presets(PRESETS)
  212. if preset_name == "random":
  213. preset_name = list(GENERATOR_PRESETS)[random.randint(0, len(GENERATOR_PRESETS) - 1)]
  214. preset = GENERATOR_PRESETS[preset_name]
  215. if debug_print or debug_out:
  216. debug_text = _debug_print(seed, preset, with_alpha, with_supersample, with_fxaa, with_ai, with_upscale, stability_model)
  217. if debug_print:
  218. for line in debug_text:
  219. print(line)
  220. if debug_out is not None:
  221. with open(debug_out, 'w') as fh:
  222. for line in debug_text:
  223. fh.write(line + "\n")
  224. try:
  225. preset.render(seed, shape=[height, width, None], time=time, speed=speed, filename=filename,
  226. with_alpha=with_alpha, with_supersample=with_supersample, with_fxaa=with_fxaa,
  227. with_ai=with_ai, with_upscale=with_upscale, stability_model=stability_model)
  228. except Exception as e:
  229. util.logger.error(f"preset.render() failed: {e}\nSeed: {seed}\nArgs: {preset.__dict__}")
  230. raise
  231. if preset.ai_success:
  232. print(f"{preset_name} vs. {preset.ai_settings['model']} (seed: {seed})")
  233. else:
  234. print(f"{preset_name} (seed: {seed})")
  235. if with_alt_text:
  236. print(ai.describe(preset.name.replace('-', ' '), preset.ai_settings.get("prompt"), filename))
  237. def _debug_print(seed, preset, with_alpha, with_supersample, with_fxaa, with_ai, with_upscale, stability_model):
  238. first_column = ["Layers:"]
  239. if preset.flattened_layers:
  240. first_column.append(" - Lineage (by newest):")
  241. if not preset.flattened_layers:
  242. first_column.append(" - None")
  243. for parent in reversed(preset.flattened_layers):
  244. first_column.append(f" - {parent}")
  245. first_column.append("")
  246. first_column.append(" - Effects (by newest):")
  247. if not preset.final_effects and not with_ai and not preset.post_effects:
  248. first_column.append(" - None")
  249. first_column.append("")
  250. if preset.final_effects:
  251. first_column.append(" - Final Pass:")
  252. for effect in reversed(preset.final_effects):
  253. if callable(effect):
  254. first_column.append(f" - {effect.func.__name__.replace('_', ' ')}")
  255. else:
  256. first_column.append(f" - {effect.name.replace('_', ' ').replace('-', ' ')}")
  257. first_column.append("")
  258. if with_ai:
  259. first_column.append(f" - AI Settings:")
  260. for (k, v) in sorted(preset.ai_settings.items()):
  261. if stability_model and k == 'model':
  262. v = stability_model
  263. for i, line in enumerate(textwrap.wrap(f"{k.replace('_', ' ')}: {v}", 42)):
  264. if i == 0:
  265. first_column.append(f" - {line}")
  266. else:
  267. first_column.append(f" {line}")
  268. first_column.append("")
  269. if preset.post_effects or with_ai:
  270. first_column.append(" - Post Pass:")
  271. if with_ai:
  272. first_column.append(" - stable diffusion")
  273. for effect in reversed(preset.post_effects):
  274. if callable(effect):
  275. first_column.append(f" - {effect.func.__name__.replace('_', ' ')}")
  276. else:
  277. first_column.append(f" - {effect.name.replace('_', ' ').replace('-', ' ')}")
  278. first_column.append("")
  279. if preset.octave_effects:
  280. first_column.append(" - Per-Octave Pass:")
  281. for effect in reversed(preset.octave_effects):
  282. if callable(effect):
  283. first_column.append(f" - {effect.func.__name__.replace('_', ' ')}")
  284. else:
  285. first_column.append(f" - {effect.name.replace('_', ' ').replace('-', ' ')}")
  286. first_column.append("")
  287. first_column.append("Canvas:")
  288. first_column.append(f" - seed: {seed}")
  289. first_column.append(f" - with alpha: {with_alpha}")
  290. first_column.append(f" - with supersample: {with_supersample}")
  291. first_column.append(f" - with fxaa: {with_fxaa}")
  292. first_column.append(f" - with upscale: {with_upscale}")
  293. first_column.append("")
  294. second_column = ["Settings:"]
  295. for (k, v) in sorted(preset.settings.items()):
  296. if isinstance(v, Enum):
  297. second_column.append(f" - {k.replace('_', ' ')}: {v.name.replace('_', ' ')}")
  298. elif isinstance(v, float):
  299. second_column.append(f" - {k.replace('_', ' ')}: {round(v, 3)}")
  300. else:
  301. second_column.append(f" - {k.replace('_', ' ')}: {v}")
  302. second_column.append("")
  303. out = []
  304. for i in range(max(len(first_column), len(second_column))):
  305. if i < len(first_column):
  306. first = first_column[i]
  307. else:
  308. first = ""
  309. if i < len(second_column):
  310. second = second_column[i]
  311. else:
  312. second = ""
  313. out.append(f"{first:50} {second}")
  314. return out
  315. def apply(ctx, seed, filename, no_resize, with_fxaa, time, speed, preset_name, input_filename):
  316. if not seed:
  317. seed = random.randint(1, MAX_SEED_VALUE)
  318. value.set_seed(seed)
  319. reload_presets(PRESETS)
  320. input_shape = util.shape_from_file(input_filename)
  321. input_shape[2] = min(input_shape[2], 3)
  322. tensor = tf.image.convert_image_dtype(util.load(input_filename, channels=input_shape[2]), dtype=tf.float32)
  323. if preset_name == "random":
  324. preset_name = list(EFFECT_PRESETS)[random.randint(0, len(EFFECT_PRESETS) - 1)]
  325. print(f"{preset_name} (seed: {seed})")
  326. preset = EFFECT_PRESETS[preset_name]
  327. if no_resize:
  328. shape = input_shape
  329. else:
  330. shape = [1024, 1024, input_shape[2]]
  331. tensor = effects.square_crop_and_resize(tensor, input_shape, shape[0])
  332. try:
  333. preset.render(seed=seed, tensor=tensor, shape=shape, with_fxaa=with_fxaa, time=time, speed=speed, filename=filename)
  334. except Exception as e:
  335. util.logger.error(f"preset.render() failed: {e}\nSeed: {seed}\nArgs: {preset.__dict__}")
  336. raise
  337. def animate(ctx, width, height, seed, effect_preset, filename, save_frames, frame_count, watermark, preview_filename, with_alt_text, with_supersample, with_fxaa, preset_name):
  338. if seed is None:
  339. seed = random.randint(1, MAX_SEED_VALUE)
  340. value.set_seed(seed)
  341. reload_presets(PRESETS)
  342. if preset_name == 'random':
  343. preset_name = list(GENERATOR_PRESETS)[random.randint(0, len(GENERATOR_PRESETS) - 1)]
  344. if effect_preset == 'random':
  345. effect_preset = list(EFFECT_PRESETS)[random.randint(0, len(EFFECT_PRESETS) - 1)]
  346. if effect_preset:
  347. print(f"{preset_name} vs. {effect_preset} (seed: {seed})")
  348. else:
  349. print(f"{preset_name} (seed: {seed})")
  350. preset = GENERATOR_PRESETS[preset_name]
  351. caption = None
  352. with tempfile.TemporaryDirectory() as tmp:
  353. for i in range(frame_count):
  354. frame_filename = f'{tmp}/{i:04d}.png'
  355. common_params = ['--seed', str(seed),
  356. '--time', f'{i/frame_count:0.4f}',
  357. '--filename', frame_filename]
  358. extra_params = []
  359. if with_alt_text and i == 0:
  360. extra_params = ['--with-alt-text']
  361. if with_supersample:
  362. extra_params += ['--with-supersample']
  363. if with_fxaa:
  364. extra_params += ['--with-fxaa']
  365. output = subprocess.check_output(['noisemaker', 'generate', preset_name,
  366. '--speed', str(_use_reasonable_speed(preset, frame_count)),
  367. '--height', str(height),
  368. '--width', str(width)] + common_params + extra_params,
  369. universal_newlines=True).strip().split("\n")
  370. if with_alt_text and i == 0:
  371. if len(output) == 6: # Useless extra crap that Tensorflow on Apple Silicon spews to stdout
  372. print(output[2])
  373. else:
  374. print(output[1])
  375. if effect_preset:
  376. extra_params = []
  377. if with_fxaa:
  378. extra_params += ['--with-fxaa']
  379. util.check_call(['noisemaker', 'apply', effect_preset, frame_filename,
  380. '--no-resize',
  381. '--speed', str(_use_reasonable_speed(EFFECT_PRESETS[effect_preset], frame_count))]
  382. + common_params + extra_params)
  383. if save_frames:
  384. shutil.copy(frame_filename, save_frames)
  385. if watermark:
  386. util.watermark(watermark, frame_filename)
  387. if preview_filename and i == 0:
  388. shutil.copy(frame_filename, preview_filename)
  389. if filename.endswith(".mp4"):
  390. util.check_call(["ffmpeg",
  391. "-framerate", "30",
  392. "-i", f"{tmp}/%04d.png",
  393. "-s", "1024x1024",
  394. "-c:v", "libx264",
  395. "-preset", "veryslow",
  396. "-crf", "15",
  397. "-pix_fmt", "yuv420p",
  398. "-b:v", "8000k",
  399. "-bufsize", "16000k",
  400. filename])
  401. else:
  402. util.magick(f'{tmp}/*png', filename)
  403. @main.command(help="Blend a directory of .png or .jpg images")
  404. @cli.input_dir_option(required=True)
  405. @cli.filename_option(default="collage.png")
  406. @click.option("--control-filename", help="Control image filename (optional)")
  407. @cli.time_option()
  408. @click.option('--speed', help="Animation speed", type=float, default=0.25)
  409. @cli.seed_option()
  410. @click.pass_context
  411. def mashup(ctx, input_dir, filename, control_filename, time, speed, seed):
  412. filenames = []
  413. for root, _, files in os.walk(input_dir):
  414. for f in files:
  415. if f.endswith(('.png', '.jpg')):
  416. filenames.append(os.path.join(root, f))
  417. collage_count = min(random.randint(4, 6), len(filenames))
  418. collage_images = []
  419. for i in range(collage_count + 1):
  420. index = random.randint(0, len(filenames) - 1)
  421. input_filename = os.path.join(input_dir, filenames[index])
  422. collage_input = tf.image.convert_image_dtype(util.load(input_filename, channels=3), dtype=tf.float32)
  423. collage_images.append(collage_input)
  424. if control_filename:
  425. control_shape = util.shape_from_file(control_filename)
  426. control = tf.image.convert_image_dtype(util.load(control_filename, channels=control_shape[2]), dtype=tf.float32)
  427. else:
  428. control = collage_images.pop()
  429. shape = tf.shape(control) # All images need to be the same size!
  430. control = value.value_map(control, shape, keepdims=True)
  431. value.set_seed(seed)
  432. base = generators.basic(freq=random.randint(2, 5), shape=shape, lattice_drift=random.randint(0, 1), hue_range=random.random(),
  433. time=time, speed=speed)
  434. value_shape = value.value_shape(shape)
  435. control = value.convolve(kernel=effects.ValueMask.conv2d_blur, tensor=control, shape=value_shape)
  436. tensor = effects.blend_layers(control, shape, random.random() * .5, *collage_images)
  437. tensor = value.blend(tensor, base, .125 + random.random() * .125)
  438. tensor = effects.bloom(tensor, shape, alpha=.25 + random.random() * .125)
  439. tensor = effects.shadow(tensor, shape, alpha=.25 + random.random() * .125, reference=control)
  440. tensor = tf.image.adjust_brightness(tensor, .1)
  441. tensor = tf.image.adjust_contrast(tensor, 1.5)
  442. util.save(tensor, filename)
  443. print('mashup')
  444. def _use_reasonable_speed(preset, frame_count):
  445. """Return a reasonable speed parameter for the given animation length."""
  446. return preset.settings.get("speed", 0.25) * (frame_count / 50.0)
  447. def dream(width, height, filename):
  448. name, prompt, description = dreamer.dream(width, height, filename=filename)
  449. print(name)
  450. print(prompt)
  451. print(description)
  452. if __name__ == "__main__":
  453. main()
  454. #testPillow()
  455. #postprocessBrightness('/tmp/')