+31
lib/diffusion/sdxl_vae.py
+31
lib/diffusion/sdxl_vae.py
···
3
3
import torch.nn as nn
4
4
from torch.nn import functional as func
5
5
from safetensors import safe_open as st_open
6
+
from PIL import Image
6
7
7
8
class Attention(nn.Module):
8
9
def __init__(self):
···
153
154
for key in sd.keys():
154
155
sd[key].copy_(file.get_tensor(key))
155
156
157
+
approximation_matrix = [
158
+
[0.85, 0.85, 0.6], # seems to be mainly value
159
+
[-0.35, 0.2, 0.5], # mainly blue? maybe a little green, def not red
160
+
[0.15, 0.15, 0], # yellow. but mainly encoding texture not color, i think
161
+
[0.15, -0.35, -0.35] # inverted value? but also red
162
+
]
163
+
164
+
def save_approx_decode(latents, path):
165
+
lmin = latents.min()
166
+
l = latents - lmin
167
+
lmax = latents.max()
168
+
l = latents / lmax
169
+
l = l.float().mul_(0.5).add_(0.5)
170
+
ims = []
171
+
for lat in l:
172
+
apx_mat = torch.tensor(approximation_matrix).to("cuda")
173
+
approx_decode = torch.einsum("...lhw,lr -> ...rhw", lat, apx_mat).mul_(255).round()
174
+
#lat -= lat.min()
175
+
#lat /= lat.max()
176
+
im_data = approx_decode.permute(1,2,0).detach().cpu().numpy().astype("uint8")
177
+
#im_data = im_data.round().astype("uint8")
178
+
im = Image.fromarray(im_data).resize(size=(im_data.shape[1]*8,im_data.shape[0]*8), resample=Image.NEAREST)
179
+
ims += [im]
180
+
181
+
#clear_output()
182
+
for im in ims:
183
+
#im.save(f"out/tmp_approx_decode/{index:06d}.bmp")
184
+
im.save(path)
185
+
#display(im)
186
+
+55
lib/internal/witchery.py
+55
lib/internal/witchery.py
···
1
+
import dis
2
+
3
+
class Errs(type):
4
+
def __iter__(_class):
5
+
return iter((k,v) for (k,v) in vars(_class).items() if not k.startswith("_"))
6
+
7
+
def __repr__(_class):
8
+
return str(list(k for (k,v) in _class))
9
+
10
+
def errs(errs):
11
+
def _errs(fn):
12
+
fn.errs = errs
13
+
return fn
14
+
return _errs
15
+
16
+
# stuff above this line i probs move to another file
17
+
18
+
class _Errs(metaclass=Errs):
19
+
FOUND_RETURN = "encountered return instruction"
20
+
IN_DEFINITION = "error in fn definition"
21
+
IN_EXECUTION = "error in fn execution"
22
+
NON_DICT_RETURN = "fn dumped a non-dict"
23
+
24
+
def _modify_to_dump_locals(fn, fn_source, deftime_globals, log):
25
+
instructions = dis.get_instructions(fn)
26
+
for instruction in instructions:
27
+
if instruction.opcode == dis.opmap["RETURN_VALUE"]:
28
+
return (False, _Errs.FOUND_RETURN)
29
+
fn_source = fn_source
30
+
fn_source_modified = f"{fn_source}\n return locals()"
31
+
sandbox = {}
32
+
try:
33
+
exec(fn_source_modified, globals=deftime_globals, locals=sandbox)
34
+
except KeyboardInterrupt:
35
+
raise
36
+
except:
37
+
log.indented().trace(source=fn)
38
+
return (False, _Errs.IN_DEFINITION)
39
+
return (True, sandbox[fn.__name__])
40
+
41
+
@errs(_Errs)
42
+
def try_dump_locals(fn, fn_source, args, kwargs, deftime_globals, log):
43
+
(success, fn_or_err) = _modify_to_dump_locals(fn, fn_source, deftime_globals, log)
44
+
if not success:
45
+
return (False, fn_or_err)
46
+
try:
47
+
res = fn_or_err(*args, **kwargs)
48
+
except KeyboardInterrupt:
49
+
raise
50
+
except:
51
+
log.indented().trace(source=fn)
52
+
return (False, _Errs.IN_EXECUTION)
53
+
if not isinstance(res, dict):
54
+
return (False, _Errs.NON_DICT_RETURN)
55
+
return (True, res)
+1
lib/iter.py
+1
lib/iter.py
+3
lib/log.py
+3
lib/log.py
···
2
2
import sys
3
3
import inspect
4
4
import traceback
5
+
import time
5
6
6
7
from dataclasses import dataclass
7
8
from typing import Optional
···
14
15
tag_color = "\033[31m"
15
16
elif mode == "success":
16
17
tag_color = "\033[92m"
18
+
elif mode == "warning":
19
+
tag_color = "\033[33m"
17
20
else:
18
21
tag_color = "\033[96m"
19
22
print(f"{tag_color}{' '*indent}[{tag}]{reset_color} {content}{final_color}")
+53
-59
main.py
+53
-59
main.py
···
15
15
from lib import util
16
16
from lib.log import Logger, inner_log
17
17
from lib.internal.parse import lsnap
18
+
from lib.internal.witchery import try_dump_locals
18
19
19
20
parser = ArgParser("snakepyt")
20
21
#parser.add_argument("sketch", help="the sketch to run", type=str)
···
28
29
schedule.append((fn, args))
29
30
return (schedule, _schedule)
30
31
31
-
def modify_to_dump_locals(fn, deftime_globals, log, sources):
32
-
instructions = dis.get_instructions(fn)
33
-
for instruction in instructions:
34
-
if instruction.opcode == dis.opmap["RETURN_VALUE"]:
35
-
return (False, "encountered return instruction")
36
-
fn_source = sources[fn.__name__]
37
-
fn_source_modified = f"{fn_source}\n return locals()"
38
-
sandbox = {}
39
-
try:
40
-
exec(fn_source_modified, globals=deftime_globals, locals=sandbox)
41
-
except KeyboardInterrupt:
42
-
raise
43
-
except:
44
-
log.indented().trace(source=fn)
45
-
return (False, "error in definition")
46
-
return (True, sandbox[fn.__name__])
47
-
48
-
49
32
def run(fn, arg, partial_id, outer_scope, log, sources, finalizer=None):
50
33
scope = dict(outer_scope)
51
34
schedule, schedule_fn = establish_scheduler()
52
35
scope["schedule"] = schedule_fn
53
36
scope["print"] = inner_log(source=fn, indent=4)
54
-
#fn_name = fn.__name__
55
37
run_id = partial_id + (arg,)
56
-
#instructions = dis.get_instructions(fn)
57
-
#for instruction in instructions:
58
-
# if instruction.opcode == dis.opmap["RETURN_VALUE"]:
59
-
# log(f"{fn_name} cannot be scheduled because it has a return instruction", mode="error")
60
-
# return False
61
-
#fn_source = sources[fn.__name__]
62
-
#fn_source_modified = f"{fn_source}\n return locals()"
63
-
#sandbox = {}
64
38
65
-
(succ, fn_or_err) = modify_to_dump_locals(fn, scope, log, sources)
66
-
if not succ:
67
-
log.indented().log(fn_or_err, mode="error")
68
-
return False
69
-
fn_modified = fn_or_err
70
-
try:
71
-
#exec(fn_source_modified, globals=scope, locals=sandbox)
72
-
#fn_modified = sandbox[fn_name]
73
-
if arg is not None:
74
-
fn_locals = fn_modified(arg)
39
+
args = [] if arg is None else [arg]
40
+
(success, locals_or_err) = try_dump_locals(fn, sources[fn.__name__], args, {}, scope, log)
41
+
if success:
42
+
scope.update(locals_or_err)
43
+
else:
44
+
err = locals_or_err
45
+
errs = try_dump_locals.errs
46
+
if err == errs.NON_DICT_RETURN:
47
+
log.indented().log("could not extract locals. check for an early return", mode="warning")
75
48
else:
76
-
fn_locals = fn_modified()
77
-
scope.update(fn_locals)
78
-
except KeyboardInterrupt:
79
-
raise
80
-
except:
81
-
log.indented().trace(source=fn)
82
-
return False
49
+
log.indented().log(locals_or_err, mode="error")
50
+
return False
51
+
52
+
83
53
for (fn, args) in schedule:
84
54
if args is not None:
85
55
for arg in args:
···
107
77
if sketch_name in persistent_hashes:
108
78
if bytecode_hash == persistent_hashes[sketch_name]:
109
79
return True
110
-
(success, fn_or_err) = modify_to_dump_locals(persistent_fn, module_globals, log, sources)
80
+
(success, locals_or_err) = try_dump_locals(persistent_fn, sources[persistent_fn.__name__], [], {}, module_globals, log)
111
81
if success:
112
-
try:
113
-
fn_locals = fn_or_err()
114
-
except KeyboardInterrupt:
115
-
raise
116
-
except:
117
-
log.indented().trace(source=persistent_fn)
118
-
return False
119
82
persistent_hashes[sketch_name] = bytecode_hash
120
-
persistent_state.update(fn_locals)
83
+
persistent_state.update(locals_or_err)
121
84
else:
122
-
log(f"failed to run persistent function: {fn_or_err}", mode="error")
85
+
log(f"failed to run persistent function: {locals_or_err}", mode="error")
86
+
123
87
return success
124
88
125
89
···
144
108
if "prefix" in persistent_state:
145
109
message = " ".join([persistent_state["prefix"], message])
146
110
111
+
message = message.lstrip()
112
+
113
+
if message.startswith("."):
114
+
if message.rstrip() == ".":
115
+
pyt_print(persistent_state)
116
+
else:
117
+
segments = [segment.strip() for segment in message.split(".")][1:]
118
+
selection = ("base scope", persistent_state)
119
+
for segment in segments:
120
+
if segment == "":
121
+
pyt_print("repeated dots (..) are redundant", mode="warning")
122
+
continue
123
+
try:
124
+
selection = (segment, selection[1][segment])
125
+
pyt_print(f"{selection[0]}: {selection[1]}")
126
+
except KeyError:
127
+
pyt_print(f"no \"{segment}\" in {selection[0]}", mode="error")
128
+
continue
129
+
except TypeError:
130
+
pyt_print(f"{selection[0]} is not a scope", mode="error")
131
+
pyt_print.indented()(f"{selection[0]}: {selection[1]}", mode="info")
132
+
133
+
continue
134
+
135
+
147
136
(command, remainder) = lsnap(message)
148
137
149
-
if command == "state":
150
-
pyt_print(persistent_state)
151
138
if command == "flush":
152
139
persistent_state = {}
153
140
persistent_hashes = {}
154
141
pyt_print("state flushed")
142
+
continue
155
143
if command in ["exit", "quit", ":q", ",q"]:
156
144
pyt_print.blank().log("goodbye <3").blank()
157
145
repl_continue = False
158
146
continue
147
+
if command in ["hello", "hi"]:
148
+
pyt_print("hiii :3")
149
+
continue
159
150
if command == "crash":
160
151
raise Exception("crashing on purpose :3")
152
+
continue
161
153
if command == "run":
162
154
sketch_name, remainder = lsnap(remainder)
163
155
···
202
194
with open(f"out/{run_dir}/.snakepyt", "w") as metadata:
203
195
metadata.write(f"snakepyt version {snakepyt_version[0]}.{snakepyt_version[1]}\n")
204
196
205
-
if hasattr(sketch, "init"):
197
+
if hasattr(sketch, "main"):
206
198
if hasattr(sketch, "final"):
207
199
finalizer = sketch.final.__code__
208
200
else:
209
201
finalizer = None
210
202
try:
211
-
run(sketch.init, None, (), sketch.__dict__, log, sources, finalizer)
203
+
run(sketch.main, None, (), sketch.__dict__, log, sources, finalizer)
212
204
except KeyboardInterrupt:
213
205
pyt_print.blank().log("aborted", mode="info").blank()
214
206
continue
215
207
else:
216
208
log("sketch has no init function", mode="error", indent=4)
217
-
#run(None, settings_functions, (), sketch.__dict__)
218
209
219
210
log(f"finished all runs in {perf_counter() - t0:.3f}s")
211
+
continue
212
+
213
+
pyt_print(f"command: {command}", mode="info")
220
214
-214
original.py
-214
original.py
···
1
-
2
-
import inspect
3
-
import dis
4
-
import traceback
5
-
import hashlib
6
-
import sys
7
-
import os
8
-
import time
9
-
import shutil
10
-
from pathlib import Path
11
-
from argparse import ArgumentParser as ArgParser
12
-
from importlib import import_module, reload
13
-
from time import perf_counter
14
-
15
-
from lib import util
16
-
from lib.log import Logger, inner_log
17
-
from lib.internal.parse import lsnap
18
-
19
-
parser = ArgParser("snakepyt")
20
-
#parser.add_argument("sketch", help="the sketch to run", type=str)
21
-
args = parser.parse_args()
22
-
23
-
snakepyt_version = (0, 0)
24
-
25
-
def establish_scheduler():
26
-
schedule = []
27
-
def _schedule(fn, args):
28
-
schedule.append((fn, args))
29
-
return (schedule, _schedule)
30
-
31
-
def modify_to_dump_locals(fn, deftime_globals, log, sources):
32
-
instructions = dis.get_instructions(fn)
33
-
for instruction in instructions:
34
-
if instruction.opcode == dis.opmap["RETURN_VALUE"]:
35
-
return (False, "encountered return instruction")
36
-
fn_source = sources[fn.__name__]
37
-
fn_source_modified = f"{fn_source}\n return locals()"
38
-
sandbox = {}
39
-
try:
40
-
exec(fn_source_modified, globals=deftime_globals, locals=sandbox)
41
-
except KeyboardInterrupt:
42
-
raise
43
-
except:
44
-
log.indented().trace(source=fn)
45
-
return (False, "error in definition")
46
-
return (True, sandbox[fn.__name__])
47
-
48
-
49
-
def run(fn, arg, partial_id, outer_scope, log, sources, finalizer=None):
50
-
scope = dict(outer_scope)
51
-
schedule, schedule_fn = establish_scheduler()
52
-
scope["schedule"] = schedule_fn
53
-
scope["print"] = inner_log(source=fn, indent=4)
54
-
fn_name = fn.__name__
55
-
run_id = partial_id + (arg,)
56
-
instructions = dis.get_instructions(fn)
57
-
for instruction in instructions:
58
-
if instruction.opcode == dis.opmap["RETURN_VALUE"]:
59
-
log(f"{fn_name} cannot be scheduled because it has a return instruction", mode="error")
60
-
return False
61
-
fn_source = sources[fn.__name__]
62
-
fn_source_modified = f"{fn_source}\n return locals()"
63
-
sandbox = {}
64
-
try:
65
-
exec(fn_source_modified, globals=scope, locals=sandbox)
66
-
fn_modified = sandbox[fn_name]
67
-
if arg is not None:
68
-
fn_locals = fn_modified(arg)
69
-
else:
70
-
fn_locals = fn_modified()
71
-
scope.update(fn_locals)
72
-
except KeyboardInterrupt:
73
-
raise
74
-
except:
75
-
log.indented().trace(source=fn)
76
-
return False
77
-
for (fn, args) in schedule:
78
-
if args is not None:
79
-
for arg in args:
80
-
run(fn, arg, run_id, scope, log.indented(), sources)
81
-
else:
82
-
t0 = perf_counter()
83
-
log(f"begin {fn.__name__} {run_id}")
84
-
success = run(fn, None, run_id, scope, log.indented(), sources)
85
-
log(f"done in {perf_counter() - t0:.3f}s", mode="success" if success else "error")
86
-
log.blank()
87
-
if finalizer is not None:
88
-
try:
89
-
exec(finalizer, globals=scope)
90
-
except KeyboardInterrupt:
91
-
raise
92
-
except:
93
-
log.indented().trace(source=fn)
94
-
return False
95
-
return True
96
-
97
-
persistent_state = {}
98
-
persistent_hashes = {}
99
-
def handle_persistent(persistent_fn, module_globals, log, sources):
100
-
bytecode_hash = hashlib.sha256(persistent_fn.__code__.co_code).hexdigest()
101
-
if sketch_name in persistent_hashes:
102
-
if bytecode_hash == persistent_hashes[sketch_name]:
103
-
return True
104
-
(success, fn_or_err) = modify_to_dump_locals(persistent_fn, module_globals, log, sources)
105
-
if success:
106
-
try:
107
-
fn_locals = fn_or_err()
108
-
except KeyboardInterrupt:
109
-
raise
110
-
except:
111
-
log.indented().trace(source=persistent_fn)
112
-
return False
113
-
persistent_hashes[sketch_name] = bytecode_hash
114
-
persistent_state.update(fn_locals)
115
-
else:
116
-
log(f"failed to run persistent function: {fn_or_err}", mode="error")
117
-
return success
118
-
119
-
120
-
try:
121
-
username = os.getlogin()
122
-
except:
123
-
username = None
124
-
125
-
pyt_print = Logger().mode("success").tag("snakepyt")
126
-
pyt_print(f"hello {username}! <3" if username else "hello! <3")
127
-
pyt_print.blank()
128
-
129
-
repl_continue = True
130
-
while repl_continue:
131
-
try:
132
-
message = pyt_print.input(username)
133
-
except (KeyboardInterrupt, EOFError):
134
-
pyt_print.blank().log("goodbye <3").blank()
135
-
repl_continue = False
136
-
continue
137
-
138
-
if "prefix" in persistent_state:
139
-
message = " ".join([persistent_state["prefix"], message])
140
-
141
-
(command, remainder) = lsnap(message)
142
-
143
-
if command == "state":
144
-
pyt_print(persistent_state)
145
-
if command == "flush":
146
-
persistent_state = {}
147
-
persistent_hashes = {}
148
-
pyt_print("state flushed")
149
-
if command in ["exit", "quit", ":q", ",q"]:
150
-
pyt_print.blank().log("goodbye <3").blank()
151
-
repl_continue = False
152
-
continue
153
-
if command == "crash":
154
-
raise Exception("crashing on purpose :3")
155
-
if command == "run":
156
-
sketch_name, remainder = lsnap(remainder)
157
-
158
-
try:
159
-
pyt_print(f"loading sketch \"{sketch_name}\"", mode="info")
160
-
module_name = f"sketch.{sketch_name}"
161
-
if module_name in sys.modules:
162
-
del sys.modules[module_name]
163
-
sketch = import_module(module_name)
164
-
except ModuleNotFoundError:
165
-
pyt_print.indented().trace()
166
-
pyt_print("no such sketch", mode="error", indent=4).blank()
167
-
continue
168
-
except KeyboardInterrupt:
169
-
pyt_print("aborted", mode="info").blank()
170
-
continue
171
-
except:
172
-
pyt_print.indented().trace()
173
-
continue
174
-
175
-
sources = { name : inspect.getsource(member)
176
-
for name, member in inspect.getmembers(sketch)
177
-
if inspect.isfunction(member) and member.__module__ == module_name
178
-
}
179
-
180
-
log = pyt_print.tag(sketch_name).mode("info")
181
-
182
-
t0 = perf_counter()
183
-
if hasattr(sketch, "persistent"):
184
-
if not handle_persistent(sketch.persistent, sketch.__dict__, log, sources):
185
-
log.blank()
186
-
continue
187
-
188
-
sketch.__dict__.update(persistent_state)
189
-
190
-
run_dir = time.strftime(f"%d.%m.%Y/{sketch_name}_t%H.%M.%S")
191
-
sketch.__dict__["run_dir"] = run_dir
192
-
Path("out/" + run_dir).mkdir(parents=True, exist_ok=True)
193
-
194
-
shutil.copy(sketch.__file__, f"out/{run_dir}/{sketch_name}.py")
195
-
196
-
with open(f"out/{run_dir}/.snakepyt", "w") as metadata:
197
-
metadata.write(f"snakepyt version {snakepyt_version[0]}.{snakepyt_version[1]}\n")
198
-
199
-
if hasattr(sketch, "init"):
200
-
if hasattr(sketch, "final"):
201
-
finalizer = sketch.final.__code__
202
-
else:
203
-
finalizer = None
204
-
try:
205
-
run(sketch.init, None, (), sketch.__dict__, log, sources, finalizer)
206
-
except KeyboardInterrupt:
207
-
pyt_print.blank().log("aborted", mode="info").blank()
208
-
continue
209
-
else:
210
-
log("sketch has no init function", mode="error", indent=4)
211
-
#run(None, settings_functions, (), sketch.__dict__)
212
-
213
-
log(f"finished all runs in {perf_counter() - t0:.3f}s")
214
-
+95
sketch/old/pre_snakepyt/abelian_sandpile.py
+95
sketch/old/pre_snakepyt/abelian_sandpile.py
···
1
+
import math
2
+
import random
3
+
import time
4
+
from pathlib import Path
5
+
6
+
import torch
7
+
8
+
from util import *
9
+
10
+
@settings
11
+
def _s():
12
+
scale = 2 ** 10
13
+
w = scale + 1
14
+
h = scale + 1
15
+
16
+
iterations = 500000
17
+
debug_modulus = 10000
18
+
19
+
offsets = [[-n,0] for n in range(17)] + [[n,0] for n in range(17)] + [[0,17]]
20
+
drop_points = [[h//2,w//2+n] for n in range(17)]
21
+
22
+
wrap_vertical = False
23
+
wrap_horizontal = True
24
+
25
+
name = "sand"
26
+
out_file = ""
27
+
28
+
device="cuda"
29
+
ctype=torch.cdouble
30
+
dtype=torch.double
31
+
32
+
@ifmain(__name__, _s)
33
+
@timed
34
+
def _main(settings):
35
+
globals().update(settings.get())
36
+
run_dir = time.strftime(f"%d.%m.%Y/{name}_t%H.%M.%S")
37
+
Path("out/" + run_dir).mkdir(parents=True, exist_ok=True)
38
+
with open(f"out/{run_dir}/settings.py", "w") as f:
39
+
f.write(settings.src)
40
+
torch.set_default_device(device)
41
+
torch.set_default_dtype(dtype)
42
+
dim = [h, w]
43
+
44
+
grid = torch.zeros(dim, dtype=torch.int)
45
+
46
+
max_c = 500
47
+
c_total = 0
48
+
last_c_total = 500
49
+
50
+
n = len(offsets)
51
+
52
+
for iteration in range(iterations):
53
+
last = torch.clone(grid)
54
+
for p in drop_points:
55
+
#grid[p[0],p[1]] += 1
56
+
grid[p[0],p[1]] = n
57
+
# todo: make this index_put_
58
+
peaks = (grid >= n).int()
59
+
c = 0
60
+
while torch.count_nonzero(peaks):
61
+
c += 1
62
+
63
+
shift = -(n * peaks)
64
+
for offset in offsets:
65
+
p_shift = torch.roll(peaks, offset, dims=[0,1])
66
+
if offset[0] < 0 and not wrap_vertical:
67
+
p_shift[offset[0]:] = 0
68
+
elif not wrap_vertical:
69
+
p_shift[:offset[0]] = 0
70
+
if offset[1] < 0 and not wrap_horizontal:
71
+
p_shift[:,offset[1]:] = 0
72
+
elif not wrap_horizontal:
73
+
p_shift[:,:offset[1]] = 0
74
+
shift += p_shift
75
+
grid += shift
76
+
77
+
78
+
peaks = (grid >= n).int()
79
+
c_total += c
80
+
if c > 20:
81
+
s = "*" if c >= max_c else ""
82
+
print(f"{iteration}: {c}{s}")
83
+
if c > (0.98 * max_c):
84
+
msave(grid / (n-1), f"{run_dir}/{out_file}_{iteration}_{c}")
85
+
msave(last / (n-1), f"{run_dir}/{out_file}_{iteration}_prev")
86
+
if c > max_c:
87
+
max_c = c
88
+
if iteration % debug_modulus == 0:
89
+
msave(grid / (n-1), f"{run_dir}/{out_file}_{iteration}_")
90
+
if c_total > (last_c_total * 2):
91
+
msave(grid / (n-1), f"{run_dir}/{out_file}_{iteration}_ct{c_total}")
92
+
last_c_total = c_total
93
+
94
+
msave(grid / (n-1), out_file)
95
+
+235
sketch/old/pre_snakepyt/blus.py
+235
sketch/old/pre_snakepyt/blus.py
···
1
+
import time
2
+
from pathlib import Path
3
+
from random import random
4
+
5
+
import torch
6
+
7
+
from util import *
8
+
9
+
type fp_range = tuple[float, float]
10
+
type fp_region2 = tuple[fp_range, fp_range]
11
+
type fp_coords2 = tuple[float, float]
12
+
type hw = tuple[int, int]
13
+
type region_mapping = tuple[fp_region2, hw]
14
+
15
+
@settings
16
+
def _s():
17
+
import math
18
+
name = "blus"
19
+
device = "cuda"
20
+
t_real = torch.double
21
+
t_complex = torch.cdouble
22
+
23
+
tau = 3.14159265358979323 * 2
24
+
25
+
flow = 0.01
26
+
ebb = 0 #1 - (1 / 1000)
27
+
randomize = False
28
+
random_range = tau #/ 50
29
+
rescaling = False
30
+
show_gridlines = False
31
+
32
+
particle_count = 1000000
33
+
34
+
iterations = 5000
35
+
36
+
# 2 ** 9 = 512; 10 => 1024; 11 => 2048
37
+
scale_power = 11
38
+
scale = 2 ** scale_power
39
+
40
+
origin = 0, 0
41
+
span = 5, 5
42
+
43
+
stretch = 1, 1
44
+
45
+
zooms = [
46
+
]
47
+
48
+
def blus(a, b):
49
+
theta_a = a.angle()
50
+
return torch.polar(a.abs() + b.abs() * torch.cos(b.angle() - theta_a), theta_a)
51
+
52
+
@ifmain(__name__, _s)
53
+
@timed
54
+
def run(settings):
55
+
globals().update(settings.get())
56
+
run_dir = time.strftime(f"%d.%m.%Y/{name}_t%H.%M.%S")
57
+
Path("out/" + run_dir).mkdir(parents=True, exist_ok=True)
58
+
Path("out/" + run_dir + "/aggregate").mkdir(parents=True, exist_ok=True)
59
+
Path("out/" + run_dir + "/frame").mkdir(parents=True, exist_ok=True)
60
+
61
+
global span
62
+
63
+
x_min = origin[0] - (span[0] / 2)
64
+
y_min = origin[1] - (span[1] / 2)
65
+
66
+
for ((xa, xb), (ya, yb)) in zooms:
67
+
x_min += span[0] * xa
68
+
y_min += span[1] * ya
69
+
span = span[0] * (xb - xa), span[1] * (yb - ya)
70
+
71
+
x_max = x_min + span[0]
72
+
y_max = y_min + span[1]
73
+
74
+
aspect = span[0] * stretch[0] / (span[1] * stretch[1])
75
+
76
+
if aspect < 1:
77
+
h = scale
78
+
w = int(scale * aspect)
79
+
else:
80
+
w = scale
81
+
h = int(scale / aspect)
82
+
83
+
x_range = (x_min, x_max)
84
+
y_range = (y_min, y_max)
85
+
region = (x_range, y_range)
86
+
mapping = (region, (h,w))
87
+
88
+
p_positions = (torch.rand([particle_count], device=device, dtype=t_complex) - (0.5 + 0.5j)) * 4
89
+
#p_positions.imag = torch.linspace(0, tau, particle_count)
90
+
#p_positions.real = torch.linspace(0, 1, particle_count)
91
+
#p_positions = torch.polar(p_positions.real, p_positions.imag)
92
+
93
+
p_colors = torch.ones([particle_count,3], device=device, dtype=t_real)
94
+
color_rotation = torch.linspace(0, tau / 4, particle_count)
95
+
96
+
p_colors[:,0] = p_positions.real / 4 + 0.5
97
+
p_colors[:,2] = p_positions.imag / 4 + 0.5
98
+
99
+
#p_colors[:,0] = torch.cos(color_rotation)
100
+
p_colors[:,1] *= 0
101
+
#p_colors[:,2] *= 0
102
+
#p_colors[:,2] = torch.sin(color_rotation)
103
+
104
+
canvas = torch.zeros([3, h, w], device=device, dtype=t_real)
105
+
scratch = torch.zeros([h, w, 3], device=device, dtype=t_real)
106
+
gridlines = torch.zeros([h,w], device=device)
107
+
108
+
def project(p):
109
+
return torch.view_as_real(p).permute(1,0)
110
+
111
+
ones = torch.ones_like(p_positions)
112
+
global direction
113
+
direction = flow * torch.polar(ones.real, 1 * tau * ones.real)
114
+
def next_positions(p):
115
+
global direction
116
+
#return blus(p, ones) - ones
117
+
if randomize:
118
+
direction = flow * torch.polar(ones.real, (random() * random_range - random_range / 2) * ones.real)
119
+
result = blus(p, direction) - direction * ebb
120
+
res_abs = result.abs()
121
+
if rescaling:
122
+
result = torch.polar(res_abs / res_abs.max(), result.angle())
123
+
return result
124
+
125
+
gridlines[(h-1)//2,:] = 1
126
+
gridlines[:,(w-1)//2] = 1
127
+
128
+
for line in range(1,6):
129
+
pos = line - y_min
130
+
pos *= (h-1) / (y_max - y_min)
131
+
try:
132
+
gridlines[math.floor(pos), :] = 0.8
133
+
except:
134
+
pass
135
+
for line in range(1,6):
136
+
pos = -line - y_min
137
+
pos *= (h-1) / (y_max - y_min)
138
+
try:
139
+
gridlines[math.floor(pos), :] = 0.8
140
+
except:
141
+
pass
142
+
for line in range(1,6):
143
+
pos = line - x_min
144
+
pos *= (w-1) / (x_max - x_min)
145
+
try:
146
+
gridlines[:,math.floor(pos)] = 0.8
147
+
except:
148
+
pass
149
+
for line in range(1,6):
150
+
pos = -line - x_min
151
+
pos *= (w-1) / (x_max - x_min)
152
+
try:
153
+
gridlines[:,math.floor(pos)] = 0.8
154
+
except:
155
+
pass
156
+
for line in range(1,10):
157
+
pos = -(line / 10) - y_min
158
+
pos *= (h-1) / (y_max - y_min)
159
+
try:
160
+
gridlines[math.floor(pos), :] = 0.3
161
+
except:
162
+
pass
163
+
for line in range(1,10):
164
+
pos = (line / 10) - y_min
165
+
pos *= (h-1) / (y_max - y_min)
166
+
try:
167
+
gridlines[math.floor(pos), :] = 0.3
168
+
except:
169
+
pass
170
+
171
+
def insert_at_coords(coords, values, target, mapping: region_mapping):
172
+
(region, hw) = mapping
173
+
(xrange, yrange) = region
174
+
(h,w) = hw
175
+
(x_min, x_max) = xrange
176
+
(y_min, y_max) = yrange
177
+
178
+
mask = torch.ones([particle_count], device=device)
179
+
mask *= (coords[1] >= x_min) * (coords[1] <= x_max)
180
+
mask *= (coords[0] >= y_min) * (coords[0] <= y_max)
181
+
in_range = mask.nonzero().squeeze()
182
+
183
+
# TODO: combine coord & value tensors so there's only one index_select necessary
184
+
coords_filtered = torch.index_select(coords.permute(1,0), 0, in_range)
185
+
values_filtered = torch.index_select(values, 0, in_range)
186
+
187
+
coords_filtered[:,1] -= x_min
188
+
coords_filtered[:,1] *= (w-1) / (x_max - x_min)
189
+
coords_filtered[:,0] -= y_min
190
+
coords_filtered[:,0] *= (h-1) / (y_max - y_min)
191
+
indices = coords_filtered.long()
192
+
193
+
target.index_put_((indices[:,0],indices[:,1]), values_filtered, accumulate=True)
194
+
195
+
196
+
for iteration in range(iterations):
197
+
p_projected = project(p_positions).clone()
198
+
199
+
if iteration % 1 == 0:
200
+
201
+
scratch *= 0
202
+
insert_at_coords(p_projected, p_colors, scratch, mapping)
203
+
canvas += scratch.permute(2,0,1)
204
+
205
+
206
+
temp = canvas.clone()
207
+
#for d in range(3):
208
+
# temp[d] -= temp[d].mean()
209
+
# temp[d] /= 8 * temp[d].std()
210
+
# temp[d] -= temp[d].min()
211
+
212
+
temp /= temp.max()
213
+
214
+
save(1 - temp.sqrt().sqrt(), f"{run_dir}/aggregate/{iteration:06d}")
215
+
#scratch /= scratch.max()
216
+
#scratch = scratch.sqrt().sqrt()
217
+
if show_gridlines:
218
+
scratch[:,:,1] += gridlines
219
+
scratch[:,:,2] += gridlines
220
+
save(scratch.permute(2,0,1), f"{run_dir}/frame/{iteration:06d}")
221
+
222
+
p_positions = next_positions(p_positions)
223
+
224
+
#for d in range(3):
225
+
# canvas[d] -= canvas[d].mean()
226
+
# canvas[d] /= 8 * canvas[d].std()
227
+
# canvas[d] -= canvas[d].min()
228
+
229
+
canvas /= canvas.max()
230
+
save(1 - canvas, f"{run_dir}/final")
231
+
232
+
with open(f"out/{run_dir}/settings.py", "w") as f:
233
+
f.write(settings.src)
234
+
torch.set_default_device(device)
235
+
+202
sketch/old/pre_snakepyt/fluid.py
+202
sketch/old/pre_snakepyt/fluid.py
···
1
+
import time
2
+
from pathlib import Path
3
+
4
+
import torch
5
+
from torch.nn.functional import conv2d as convolve
6
+
7
+
from util import *
8
+
9
+
@settings
10
+
def _s():
11
+
scale = 2**11
12
+
w = scale
13
+
h = scale
14
+
15
+
max_iterations = 100000
16
+
save_every = 100
17
+
18
+
v_diffusion_rate = 2000
19
+
m_diffusion_rate = 0.5
20
+
m_timescale = 0.01
21
+
timescale = 0.001
22
+
23
+
name = "fluid"
24
+
25
+
device = "cuda"
26
+
27
+
28
+
diffuse_kernel = torch.tensor([[[[0,1,0],[1,0,1],[0,1,0]]]], dtype=torch.float, device="cuda")
29
+
project_kernel_x = torch.tensor([[[[0,0,0],[-1,0,1],[0,0,0]]]], dtype=torch.float, device="cuda")
30
+
project_kernel_y = torch.tensor([[[[0,-1,0],[0,0,0],[0,1,0]]]], dtype=torch.float, device="cuda")
31
+
32
+
def continuous_boundary(field):
33
+
field[0] = field[1]
34
+
field[-1] = field[-2]
35
+
field[:,0] = field[:,1]
36
+
field[:,-1] = field[:,-2]
37
+
field[(0,0,-1,-1),(0,-1,0,-1)] = 0.5 * (field[(0,0,-2,-2),(1,-2,0,-1)] + field[(1,1,-1,-1),(0,-1,1,-2)])
38
+
39
+
def opposed_v_boundary(field):
40
+
field[0] = field[1]
41
+
field[-1] = field[-2]
42
+
field[:,0] = -field[:,1]
43
+
field[:,-1] = -field[:,-2]
44
+
field[(0,0,-1,-1),(0,-1,0,-1)] = 0.5 * (field[(0,0,-2,-2),(1,-2,0,-1)] + field[(1,1,-1,-1),(0,-1,1,-2)])
45
+
46
+
def opposed_h_boundary(field):
47
+
field[0] = -field[1]
48
+
field[-1] = -field[-2]
49
+
field[:,0] = field[:,1]
50
+
field[:,-1] = field[:,-2]
51
+
field[(0,0,-1,-1),(0,-1,0,-1)] = 0.5 * (field[(0,0,-2,-2),(1,-2,0,-1)] + field[(1,1,-1,-1),(0,-1,1,-2)])
52
+
53
+
54
+
def diffuse(field, rate, set_boundary, dt, h, w):
55
+
a = dt * rate
56
+
result = torch.clone(field)
57
+
if field.shape != (h, w):
58
+
print("bad field shape in diffuse")
59
+
for n in range(20):
60
+
convolution = a * convolve(result.unsqueeze(0), diffuse_kernel, bias=None, padding=[0], stride=[1])[0]
61
+
result[1:h-1,1:w-1] = field[1:h-1,1:w-1] + convolution
62
+
result /= 1 + 4 * a
63
+
set_boundary(result)
64
+
#result = result * ~border_mask[0] + field * border_mask[0]
65
+
return result
66
+
67
+
def advect(field, velocities, dt, h, w):
68
+
dth, dtw = dt, dt
69
+
inds_x = torch.arange(1,w-1).repeat(h-2,1).float()
70
+
inds_y = torch.arange(1,h-1).repeat(w-2,1).t().float()
71
+
inds_x += dtw * velocities[1,1:h-1,1:w-1]
72
+
inds_y += dth * velocities[0,1:h-1,1:w-1]
73
+
inds_x = inds_x.clamp(1.5, w - 2.5)
74
+
inds_y = inds_y.clamp(1.5, h - 2.5)
75
+
inds_x_i = inds_x.int()
76
+
inds_y_i = inds_y.int()
77
+
inds_x -= inds_x_i
78
+
inds_y -= inds_y_i
79
+
inds_x_inv = 1 - inds_x
80
+
inds_y_inv = 1 - inds_y
81
+
inds_x_i_next = inds_x_i + 1
82
+
inds_y_i_next = inds_y_i + 1
83
+
inds_x_all = torch.stack([inds_x_i, inds_x_i_next, inds_x_i, inds_x_i_next])
84
+
inds_y_all = torch.stack([inds_y_i, inds_y_i, inds_y_i_next, inds_y_i_next])
85
+
if field.shape[0] == 1:
86
+
values = torch.cat([field[:,1:h-1,1:w-1] * inds_x_inv * inds_y_inv,
87
+
field[:,1:h-1,1:w-1] * inds_x * inds_y_inv,
88
+
field[:,1:h-1,1:w-1] * inds_x_inv * inds_y,
89
+
field[:,1:h-1,1:w-1] * inds_x * inds_y])
90
+
res = torch.zeros_like(field[0])
91
+
res.index_put_((inds_y_all, inds_x_all), values, accumulate=True)
92
+
continuous_boundary(res)
93
+
return res.unsqueeze(0)
94
+
else:
95
+
values = torch.stack([field[:,1:h-1,1:w-1] * inds_x_inv * inds_y_inv,
96
+
field[:,1:h-1,1:w-1] * inds_x * inds_y_inv,
97
+
field[:,1:h-1,1:w-1] * inds_x_inv * inds_y,
98
+
field[:,1:h-1,1:w-1] * inds_x * inds_y])
99
+
res = torch.zeros_like(field)
100
+
res[0].index_put_((inds_y_all, inds_x_all), values[:,0,:,:], accumulate=True)
101
+
res[1].index_put_((inds_y_all, inds_x_all), values[:,1,:,:], accumulate=True)
102
+
opposed_h_boundary(res[1])
103
+
opposed_v_boundary(res[0])
104
+
return res
105
+
106
+
def project(field, h, w):
107
+
hx = -1# / w
108
+
hy = -1# / h
109
+
divergence = convolve(field[1].unsqueeze(0), project_kernel_x, bias=None, stride=[1], padding=[0])[0] * hx
110
+
divergence += convolve(field[0].unsqueeze(0), project_kernel_y, bias=None, stride=[1], padding=[0])[0] * hy
111
+
divergence *= 0.5
112
+
continuous_boundary(divergence)
113
+
p = torch.zeros_like(field[0])
114
+
for i in range(40):
115
+
p[1:h-1,1:w-1] = (divergence + convolve(p.unsqueeze(0), diffuse_kernel, bias=None, stride=[1], padding=[0])[0]) / 4
116
+
continuous_boundary(p)
117
+
field[1,1:h-1,1:w-1] += 0.5 * convolve(p.unsqueeze(0), project_kernel_x, bias=None, stride=[1], padding=[0])[0] / hx
118
+
field[0,1:h-1,1:w-1] += 0.5 * convolve(p.unsqueeze(0), project_kernel_y, bias=None, stride=[1], padding=[0])[0] / hy
119
+
opposed_h_boundary(field[1])
120
+
opposed_v_boundary(field[0])
121
+
122
+
@ifmain(__name__, _s)
123
+
@timed
124
+
def _main(settings):
125
+
globals().update(settings.get())
126
+
run_dir = time.strftime(f"%d.%m.%Y/{name}_t%H.%M.%S")
127
+
Path("out/" + run_dir).mkdir(parents=True, exist_ok=True)
128
+
with open(f"out/{run_dir}/settings.py", "w") as f:
129
+
f.write(settings.src)
130
+
torch.set_default_device(device)
131
+
132
+
133
+
#velocities = torch.zeros([2, h, w])
134
+
upsample = torch.nn.Upsample(size=[h, w], mode='bilinear')
135
+
velocities = torch.randn([2, h//32, w//32]) * 1 #* 100
136
+
densities = torch.ones([1, h, w]) * 0.1
137
+
138
+
cg = cgrid(h, w, 0 + 0j, 4 + 4j, dtype=torch.float, ctype=torch.cfloat)
139
+
140
+
velocities = upsample(velocities.unsqueeze(0))[0]#cg.real - cg.imag * torch.sin(cg.real)
141
+
#velocities[1] = 0#-cg.imag - cg.real * torch.cos(cg.imag)
142
+
143
+
#for i in range(3):
144
+
# for j in range(3):
145
+
# w7 = w // 7
146
+
# h7 = h // 7
147
+
# densities[0][(2*j+1)*h7:(2*j+2)*h7][(2*i+1)*w7:(2*i+2)*w7] = 1
148
+
# print((2*j+1)*h7)
149
+
# print((2*j+2)*h7)
150
+
151
+
152
+
#w7 = w//7
153
+
#h7 = h//7
154
+
#for i in range(w7):
155
+
# for j in range(h7):
156
+
# for a in range(3):
157
+
# for b in range(3):
158
+
# densities[0][j + h7 * (2 * a + 1)][w7 * (2 * b + 1) + i] = 1.0
159
+
160
+
initial = torch.cat((velocities * 0.5 + 0.5, densities), dim=0)
161
+
save(initial, f"{run_dir}/initial")
162
+
del initial
163
+
164
+
border_mask = torch.ones([2,h,w], dtype=torch.int)
165
+
border_mask[:,1:h-1,1:w-1] = 0;
166
+
167
+
for iteration in range(max_iterations):
168
+
velocities[0,3 * h//4 - 10:(3*h//4),w//2-10:w//2+10] = -10 * 40
169
+
velocities[1,3 * h//4:(3*h//4)+10,w//2-10:w//2+10] = -9 * 40
170
+
velocities[0,h//4:(h//4)+10,w//2-3:w//2+3] = 10 * 40
171
+
velocities[1,h//4:(h//4)+10,w//2-3:w//2+3] = 20 * 40
172
+
velocities[0] += 0.01
173
+
#densities[0,3*h//4-10:(3*h//4),w//2-9:w//2+9] += 0.1
174
+
#densities[0,h//4:(h//4)+10,w//2+30:w//2+55] += 0.1
175
+
densities += 0.001
176
+
velocities[0] = diffuse(velocities[0], v_diffusion_rate, opposed_h_boundary, timescale, h, w)
177
+
velocities[1] = diffuse(velocities[1], v_diffusion_rate, opposed_v_boundary, timescale, h, w)
178
+
project(velocities, h, w)
179
+
velocities = advect(velocities, velocities, timescale, h, w)
180
+
project(velocities, h, w)
181
+
densities = diffuse(densities[0], m_diffusion_rate, continuous_boundary, m_timescale, h, w).unsqueeze(0)
182
+
densities = advect(densities, velocities, m_timescale, h, w)
183
+
densities *= 0.99
184
+
if iteration % save_every == 0:
185
+
#res = torch.cat((velocities * 0.5 + 0.5, densities), dim=0)
186
+
#save(res, f"{run_dir}/{iteration}")
187
+
msave(densities[0], f"{run_dir}/_{iteration}")
188
+
189
+
final = torch.cat((velocities * 0.5 + 0.5, densities), dim=0)
190
+
save(final, f"{run_dir}/final")
191
+
192
+
193
+
194
+
195
+
196
+
197
+
198
+
199
+
200
+
201
+
202
+
+277
sketch/old/pre_snakepyt/fluid_clean.py
+277
sketch/old/pre_snakepyt/fluid_clean.py
···
1
+
# pytorch adaptation of the methods described here:
2
+
# https://www.dgp.toronto.edu/public_user/stam/reality/Research/pdf/GDC03.pdf (overall design)
3
+
# https://www.karlsims.com/fluid-flow.html (divergence clearing step)
4
+
5
+
# only dependencies are pillow and pytorch
6
+
7
+
import math
8
+
import time
9
+
from pathlib import Path
10
+
from PIL import Image
11
+
12
+
import torch
13
+
from torch.nn.functional import conv2d
14
+
15
+
# settings
16
+
17
+
result_directory = "out"
18
+
Path(result_directory).mkdir(parents=True, exist_ok=True)
19
+
Path(result_directory + "/velocities").mkdir(parents=True, exist_ok=True)
20
+
Path(result_directory + "/densities").mkdir(parents=True, exist_ok=True)
21
+
22
+
scale = 10
23
+
w = 2**scale
24
+
h = 2**scale
25
+
26
+
max_iterations = 100000
27
+
save_every = 1
28
+
29
+
velocity_diffusion_rate = 20
30
+
density_diffusion_rate = None
31
+
density_timescale = 0.005
32
+
timescale = 0.01
33
+
34
+
torch.set_default_device("cuda")
35
+
36
+
diffusion_solver_steps = 10
37
+
divergence_clearing_steps = 20
38
+
39
+
40
+
# save images
41
+
42
+
def monochrome_save(tensor, filename):
43
+
"""save a 2d tensor of shape (h,w) as a monochrome image"""
44
+
scaled = torch.clone(tensor).clamp_(0,1).mul_(255).round().type(torch.uint8)
45
+
# copy to make 3 color channels
46
+
rearranged = scaled.unsqueeze(2).expand(-1, -1, 3).cpu().numpy()
47
+
Image.fromarray(rearranged).save(f"{result_directory}/{filename}.png")
48
+
49
+
def save(tensor, filename):
50
+
"""save a 3d tensor with shape (3,h,w) as an rgb image"""
51
+
scaled = torch.clone(tensor).clamp_(0, 1).mul_(255).round().type(torch.uint8)
52
+
rearranged = scaled.permute(1, 2, 0).cpu().numpy()
53
+
Image.fromarray(rearranged).save(f"{result_directory}/{filename}.png")
54
+
55
+
56
+
57
+
58
+
59
+
# convolution
60
+
61
+
# the backbone of this simulation is a handful of discrete convolutions
62
+
# detailed definition here: https://en.wikipedia.org/wiki/Convolution#Discrete_convolution
63
+
64
+
# roughly: we have a discrete field and a "kernel".
65
+
# we look at a small window of values in the field, multiply each value there by a number,
66
+
# and sum the results. the kernel describes what number to multiply each value in the window by.
67
+
# we do this for every possible window, centered on every point we can fit the window around,
68
+
# skipping points where the window would hang off the edge of the field. thus, we end up with
69
+
# a result that's slightly smaller than the field
70
+
71
+
def convolve(field, kernel):
72
+
# conv2d works in batches & we only ever need to do a 1-element batch
73
+
# "unsqueeze" is pytorch's absolutely bizarre term for wrapping a tensor with another dimension. like going from [1,2] to [[1,2]]
74
+
return conv2d(field.unsqueeze(0), kernel, bias=None, padding=[0], stride=[1])[0]
75
+
76
+
# convolution kernels:
77
+
78
+
# total of values of immediate neighbors in all 4 cardinal directions
79
+
diffusion_kernel = torch.tensor([[[
80
+
[0, 1, 0],
81
+
[1, 0, 1],
82
+
[0, 1, 0]
83
+
]]], dtype=torch.float)
84
+
85
+
# gradient of divergence in the x direction
86
+
x_div_kernel = torch.tensor([[[
87
+
[ 1, 2, 1],
88
+
[-2,-4,-2],
89
+
[ 1, 2, 1]
90
+
]]], dtype=torch.float)
91
+
92
+
div_opp_kernel = torch.tensor([[[
93
+
[ 1, 0,-1],
94
+
[ 0, 0, 0],
95
+
[-1, 0, 1]
96
+
]]], dtype=torch.float)
97
+
98
+
# gradient of divergence in the y direction
99
+
y_div_kernel = torch.tensor([[[
100
+
[ 1,-2, 1],
101
+
[ 2,-4, 2],
102
+
[ 1,-2, 1]
103
+
]]], dtype=torch.float)
104
+
105
+
# various boundary conditions
106
+
107
+
def continuous_boundary(field):
108
+
"""set the border values of the provided discretized 2d scalar field (that is, a 2d array of numbers) to be equal to their inner neighbors (& average the corner points)"""
109
+
field[0] = field[1]
110
+
field[-1] = field[-2]
111
+
field[:,0] = field[:,1]
112
+
field[:,-1] = field[:,-2]
113
+
# this is doing four indexing operations at once, getting all four corners in one go
114
+
field[(0,0,-1,-1),(0,-1,0,-1)] = 0.5 * (field[(0,0,-2,-2),(1,-2,0,-1)] + field[(1,1,-1,-1),(0,-1,1,-2)])
115
+
116
+
def opposed_vertical_boundary(field):
117
+
"""set the border values of a discretized 2d field to be equal to their inner neighbors horizontally but opposite their neighbors vertically. average at corners"""
118
+
field[0] = field[1]
119
+
field[-1] = field[-2]
120
+
field[:,0] = -field[:,1]
121
+
field[:,-1] = -field[:,-2]
122
+
field[(0,0,-1,-1),(0,-1,0,-1)] = 0.5 * (field[(0,0,-2,-2),(1,-2,0,-1)] + field[(1,1,-1,-1),(0,-1,1,-2)])
123
+
124
+
def opposed_horizontal_boundary(field):
125
+
"""set border values equal vertically, opposite horizontally, average at corners"""
126
+
field[0] = -field[1]
127
+
field[-1] = -field[-2]
128
+
field[:,0] = field[:,1]
129
+
field[:,-1] = field[:,-2]
130
+
field[(0,0,-1,-1),(0,-1,0,-1)] = 0.5 * (field[(0,0,-2,-2),(1,-2,0,-1)] + field[(1,1,-1,-1),(0,-1,1,-2)])
131
+
132
+
133
+
134
+
def diffuse(field, rate, boundary_condition, timescale):
135
+
"""diffuse the scalar field. basically means repeatedly applying a blur to it"""
136
+
a = timescale * rate
137
+
result = torch.clone(field)
138
+
for n in range(diffusion_solver_steps):
139
+
# scaled sum of surrounding points
140
+
convolution = a * convolve(result, diffusion_kernel)
141
+
# convolution operation doesn't produce output for the border rows & columns, thus the "1:n-1" indexing
142
+
result[1:h-1,1:w-1] = field[1:h-1,1:w-1] + convolution
143
+
result /= 1 + 4 * a
144
+
boundary_condition(field)
145
+
return result
146
+
147
+
# generate indices outside the function for a tiny performance improvement
148
+
indices_x = torch.arange(1,w-1,dtype=torch.float).repeat(h-2,1)
149
+
indices_y = torch.arange(1,h-1,dtype=torch.float).repeat(w-2,1).t()
150
+
indices = torch.stack((indices_y, indices_x))
151
+
152
+
# defining these here to reuse the same gpu memory for each advect() call
153
+
# TODO do the same for the rest of the tensors
154
+
offsets = indices.clone()
155
+
inverse_offsets = indices.clone()
156
+
indices_int = indices.int()
157
+
next_indices = indices_int.clone()
158
+
159
+
160
+
# advection
161
+
# this is a point where i deviate from the original paper.
162
+
# i take the velocity at every point, and add a scaled version of that to the index at each point
163
+
# to find where that velocity will carry whatever is being advected along it in one timestep.
164
+
# that will be a point that i decompose into the integer component & fractional component. like (3.4, 1.2) -> (3,1) + (0.4, 0.2)
165
+
# the point itself will be somewhere between two indices on each axis. the integer component determines the first of these two
166
+
# indices, and the fractional component determines which of the points it's closer to
167
+
168
+
# the paper i based this on did this weird backward version of that, where they use the negative velocity at each point to
169
+
# select a point & interpolate the values of the field at the surrounding points
170
+
171
+
def advect(field, velocities, timescale):
172
+
"""given any field, and a velocity field of the same height & width, move the values in the field a small distance in the direction of the velocity field"""
173
+
offsets[...] = indices
174
+
offsets.add_(timescale * velocities[:,1:h-1,1:w-1])
175
+
offsets[1].clamp_(1.5, w - 2.5)
176
+
offsets[0].clamp_(1.5, h - 2.5)
177
+
indices_int[...] = offsets.int()
178
+
offsets.sub_(indices_int)
179
+
inverse_offsets[...] = 1 - offsets
180
+
next_indices[...] = indices_int + 1
181
+
inds_x_all = torch.stack([indices_int[1], next_indices[1], indices_int[1], next_indices[1]])
182
+
inds_y_all = torch.stack([indices_int[0], indices_int[0], next_indices[0], next_indices[0]])
183
+
values = torch.stack([field[:,1:h-1,1:w-1] * inverse_offsets[1] * inverse_offsets[0],
184
+
field[:,1:h-1,1:w-1] * offsets[1] * inverse_offsets[0],
185
+
field[:,1:h-1,1:w-1] * inverse_offsets[1] * offsets[0],
186
+
field[:,1:h-1,1:w-1] * offsets[1] * offsets[0]])
187
+
res = torch.zeros_like(field)
188
+
res[0].index_put_((inds_y_all, inds_x_all), values[:,0,:,:], accumulate=True)
189
+
if field.shape[0] == 1:
190
+
continuous_boundary(res[0])
191
+
else:
192
+
res[1].index_put_((inds_y_all, inds_x_all), values[:,1,:,:], accumulate=True)
193
+
opposed_horizontal_boundary(res[1])
194
+
opposed_vertical_boundary(res[0])
195
+
return res
196
+
197
+
def clear_divergence(field):
198
+
opposed_horizontal_boundary(field[0])
199
+
opposed_vertical_boundary(field[1])
200
+
for i in range(divergence_clearing_steps):
201
+
x_op = convolve(field[0], div_opp_kernel)
202
+
y_op = convolve(field[1], div_opp_kernel)
203
+
field[1,1:h-1,1:w-1] += (convolve(field[1], y_div_kernel) + x_op) / 8
204
+
field[0,1:h-1,1:w-1] += (convolve(field[0], x_div_kernel) + y_op) / 8
205
+
opposed_horizontal_boundary(field[0])
206
+
opposed_vertical_boundary(field[1])
207
+
208
+
209
+
210
+
211
+
# initialize a small field of random velocities, then upscale it interpolating between those velocities,
212
+
# so that the variation in velocity is somewhat smooth instead of pure per-pixel noise,
213
+
# which would lead to a less interesting simulation
214
+
velocities = torch.randn([2, h//32, w//32]) * 120
215
+
upsample = torch.nn.Upsample(size=[h, w], mode='bilinear')
216
+
velocities = upsample(velocities.unsqueeze(0))[0]
217
+
218
+
# initialize "dye" densities to a uniform field
219
+
densities = torch.ones([1, h, w]) * 0.3
220
+
# add lines
221
+
#for x in range(20):
222
+
# densities[0,:,x*w//20] += 0.8
223
+
#for y in range(20):
224
+
# densities[0,y*h//20,:] += 0.8
225
+
226
+
frame_index = 0
227
+
228
+
last_frame = time.perf_counter()
229
+
230
+
limit = 1 / (timescale * 2 * math.sqrt(2))
231
+
232
+
# core loop of the simulation
233
+
for iteration in range(max_iterations):
234
+
# add a tiny bit of "dye" to the fluid at all points
235
+
densities.add_(0.003)
236
+
#if iteration % (30 * save_every) == 0:
237
+
# for x in range(20):
238
+
# densities[0,:,x*w//20] += 0.3
239
+
# for y in range(20):
240
+
# densities[0,y*h//20,:] += 0.3
241
+
242
+
243
+
#velocities[1,h//12+h//3:h//12+2*h//3,w//8] += 50 + 50 * math.sin(iteration * 0.005)
244
+
#velocities[1,h//3-h//12:2*h//3-h//12,7*w//8] -= 20 + 70 * math.sin(iteration * 0.01)
245
+
#velocities[0,7*h//8,4*w//6:5*w//6] -= 60 + 50 * math.sin(iteration * 0.03)
246
+
247
+
# diffuse the velocities in both directions, enforcing a boundary
248
+
# condition that maintains a constant inward velocity matching the outward velocity along the edges. that is, a wall
249
+
velocities[0] = diffuse(velocities[0], velocity_diffusion_rate, opposed_horizontal_boundary, timescale)
250
+
velocities[1] = diffuse(velocities[1], velocity_diffusion_rate, opposed_vertical_boundary, timescale)
251
+
clear_divergence(velocities)
252
+
velocities.clamp_(-limit, limit)
253
+
254
+
# let the velocity field flow along itself
255
+
velocities = advect(velocities, velocities, timescale)
256
+
clear_divergence(velocities)
257
+
258
+
# diffuse the densities & let them flow along the velocity field
259
+
if density_diffusion_rate is not None:
260
+
densities = diffuse(densities[0], density_diffusion_rate, continuous_boundary, density_timescale).unsqueeze(0)
261
+
densities = advect(densities, velocities, density_timescale)
262
+
263
+
# remove a little density
264
+
densities.sub_(0.003)
265
+
densities.clamp_(0,1)
266
+
267
+
if iteration % save_every == 0:
268
+
frame_index += 1
269
+
frame_time = time.perf_counter() - last_frame
270
+
image = torch.cat((0.5 + 0.5 * velocities / torch.sqrt(velocities[0]**2 + velocities[1]**2), torch.zeros((1,h,w))), dim=0)
271
+
save(image, f"velocities/{frame_index:06d}")
272
+
monochrome_save(densities[0], f"densities/{frame_index:06d}")
273
+
274
+
print(f"frame {frame_index:06d}: {frame_time:06f}")
275
+
print(f"[{frame_time/save_every:06f}/it for {save_every} iterations]")
276
+
last_frame = time.perf_counter()
277
+
+30
sketch/old/pre_snakepyt/fluid_minimal.py
+30
sketch/old/pre_snakepyt/fluid_minimal.py
···
1
+
import torch as t;from PIL import Image as I;w,h,iters,save_every,r,dt,conv=2**9,2**9,100000,1,10,0.005,lambda f,k:t.nn.functional.conv2d(f.unsqueeze(0),k,bias=None,padding=[0],stride=[1])[0]
2
+
t.set_default_device("cuda");kd,kph,kpv=t.tensor([[[[0,1.0,0],[1,0,1],[0,1,0]]]]),t.tensor([[[[0,0,0],[1.0,0,-1],[0,0,0]]]]),t.tensor([[[[0,1.0,0],[0,0,0],[0,-1,0]]]])
3
+
def bd(f,lt=lambda t:(t[0],t[1])):f[(0,-1)],f[:,(0,-1)]=lt((f[(1,-2)],f[:,(1,-2)]));f[(0,0,-1,-1),(0,-1,0,-1)]=0.5*(f[(0,0,-2,-2),(1,-2,0,-1)]+f[(1,1,-1,-1),(0,-1,1,-2)])
4
+
ind,lim=t.stack((t.arange(1,h-1,dtype=t.float).repeat(w-2,1).t(),t.arange(1,w-1,dtype=t.float).repeat(h-2,1))),1/(dt*1.4142)
5
+
def adv(f, v):
6
+
off=ind.clone().add_(dt*v[:,1:h-1,1:w-1]);off[1].clamp_(1.5,w-2.5);off[0].clamp_(1.5,h-2.5);ind_int=off.int()
7
+
inv_off,next_ind=1-off.sub_(ind_int),ind_int+1;i=(t.stack([ind_int[0],ind_int[0],next_ind[0],next_ind[0]]),t.stack([ind_int[1],next_ind[1],ind_int[1],next_ind[1]]))
8
+
res,values=t.zeros_like(f),t.stack([f[:,1:h-1,1:w-1]*inv_off[1]*inv_off[0],f[:,1:h-1,1:w-1]*off[1]*inv_off[0],f[:,1:h-1,1:w-1]*inv_off[1]*off[0],f[:,1:h-1,1:w-1]*off[1]*off[0]])
9
+
res[0].index_put_(i,values[:,0,:,:],accumulate=True)
10
+
if f.shape[0]==1:bd(res[0])
11
+
else:res[1].index_put_(i,values[:,1,:,:],accumulate=True);bd(res[1],lambda t:(-t[0],t[1]));bd(res[0],lambda t:(t[0],-t[1]))
12
+
return res
13
+
def proj(f):
14
+
div,p=(conv(f[1],kph)+conv(f[0],kpv))*0.5,t.zeros_like(f[0]);bd(div)
15
+
for i in range(80):p[1:h-1,1:w-1]=(div+conv(p,kd))/4;bd(p)
16
+
f[1,1:h-1,1:w-1]+=0.5*conv(p,kph);f[0,1:h-1,1:w-1]+=0.5*conv(p,kpv);bd(f[1],lambda t:(-t[0],t[1]));bd(f[0],lambda t:(t[0],-t[1]))
17
+
v,d,i=t.nn.Upsample(size=[h,w],mode='bilinear')((t.randn([2,h//128,w//128])*50).unsqueeze(0))[0],t.ones([1,h,w])*0.1,1
18
+
d[0,:,tuple(x*w//20 for x in range(20))]+=0.8;d[0,tuple(x*h//20 for x in range(20)),:]+=0.8
19
+
for it in range(iters):
20
+
v.nan_to_num_(0);
21
+
d.add_(0.003);v[1,h//3:2*h//3,w//8].add_(40).clamp_(-lim,lim);v[0,1:h-1,1:w-1]=(v[0,1:h-1,1:w-1]+dt*r*conv(v[0],kd))/(1+4*dt*r)
22
+
v[1,1:h-1,1:w-1]=(v[1,1:h-1,1:w-1]+dt*r*conv(v[1],kd))/(1+4*dt*r);bd(v);proj(v);v=adv(v,v);proj(v);d=adv(d,v).sub_(0.003).clamp_(0,1)
23
+
print(f"{it}: {t.count_nonzero(t.isnan(v.view(-1))).item()} v nans")
24
+
if it%save_every == 0:
25
+
i+=1
26
+
I.fromarray(d[0].detach().clone().clamp_(0,1).mul_(255).round().type(t.uint8).unsqueeze(2).expand(-1,-1,3).cpu().numpy()).save(f"out/densities/{i:06d}.png")
27
+
#I.fromarray(t.cat((v.isnan(),t.zeros_like(v[0]).unsqueeze(0))).detach().clone().clamp_(0,1).mul_(255).round().type(t.uint8).permute(1,2,0).cpu().numpy()).save(f"out/velocities/nan_{i:06d}.png")
28
+
I.fromarray(t.cat(((0.5 + 0.5 * v / t.sqrt(v[0]**2 + v[1]**2)), t.zeros_like(v[0]).unsqueeze(0))).detach().clone().clamp_(0,1).mul_(255).round().type(t.uint8).permute(1,2,0).cpu().numpy()).save(f"out/velocities/dir_{i:06d}.png")
29
+
#I.fromarray((0.5 + 0.5 * (v[0]**2+v[1]**2) / (v[0]**2 + v[1]**2).abs().max()).detach().clone().clamp_(0,1).mul_(255).round().type(t.uint8).unsqueeze(2).expand(-1,-1,3).cpu().numpy()).save(f"out/velocities/mag_{i:06d}.png")
30
+
+2
sketch/old/pre_snakepyt/fluid_vids
+2
sketch/old/pre_snakepyt/fluid_vids
+156
sketch/old/pre_snakepyt/fractal.py
+156
sketch/old/pre_snakepyt/fractal.py
···
1
+
import time
2
+
import torch
3
+
from PIL import Image
4
+
from util import *
5
+
6
+
import math
7
+
8
+
# Settings
9
+
10
+
w = 2**13
11
+
h = w
12
+
13
+
#x_range = [-1.8, -1.7]
14
+
#y_range = [-0.095, 0.005]
15
+
16
+
#ship
17
+
x_start = -1.8
18
+
x_span = 0.1
19
+
y_start = -0.095
20
+
y_span = 0.1
21
+
22
+
#x_start = -math.e / 2
23
+
#x_span = 1.54
24
+
#y_start = -2
25
+
#y_span = 2
26
+
27
+
alpha = 2.5
28
+
29
+
x_range = [x_start, x_start + x_span]
30
+
y_range = [y_start, y_start + y_span]
31
+
32
+
final_file = "ship_big"
33
+
34
+
device = "cuda"
35
+
ctype = torch.cfloat
36
+
dtype = torch.float
37
+
38
+
# how many random samples in each point
39
+
sample_ct = 40
40
+
noise_scale = x_span / (2 * w)
41
+
42
+
do_jitter = False
43
+
jitter_scale = 2.5 / w
44
+
jitter_chunk_size = 128
45
+
46
+
# how many iterations of the z_(n+1) = f(z_n) recurrence
47
+
iterations = 120
48
+
49
+
# threshold for defining escape
50
+
limit = 2.0
51
+
52
+
if __name__ == "__main__":
53
+
t0 = time.perf_counter()
54
+
55
+
z_init = torch.zeros([h, w], dtype=ctype, device=device)
56
+
c = torch.zeros([h, w], dtype=ctype, device=device)
57
+
j = torch.zeros([h, w], dtype=ctype, device=device)
58
+
j.imag = torch.ones([h, w], dtype=dtype, device=device)
59
+
60
+
yspace = torch.linspace(y_range[0], y_range[1], h, dtype=dtype, device=device)
61
+
xspace = torch.linspace(x_range[0], x_range[1], w, dtype=dtype, device=device)
62
+
63
+
for x in range(h):
64
+
c[x] += xspace
65
+
66
+
for y in range(w):
67
+
c[:, y] += yspace * 1j
68
+
69
+
#save(c, "c")
70
+
#save(z_init, "z0")
71
+
72
+
#z = z_init
73
+
74
+
totals = torch.zeros([h, w], dtype=torch.int, device=device)
75
+
jitter = torch.randn([h // jitter_chunk_size, w // jitter_chunk_size], dtype=ctype, device=device) * jitter_scale
76
+
if do_jitter:
77
+
upsample = torch.nn.Upsample(size=[h, w], mode='nearest')
78
+
jitter = torch.view_as_real(jitter).permute((2,0,1))[None, ...]
79
+
jitter = torch.view_as_complex(upsample(jitter)[0].permute((1,2,0)).contiguous())
80
+
81
+
#csave(jitter, "jitter")
82
+
#exit()
83
+
84
+
for sample_idx in range(sample_ct):
85
+
noise = torch.randn([h, w], dtype=ctype, device=device)
86
+
z = z_init# + noise * noise_scale
87
+
c_n = c + noise * noise_scale
88
+
mask = torch.abs(z) < limit
89
+
iters = torch.zeros_like(mask, dtype=torch.uint8)
90
+
for i in range(iterations):
91
+
im = torch.abs(z.imag) * j
92
+
z = (torch.abs(z.real) + im)**2 + c_n
93
+
#z = torch.exp(-alpha * z) + c_n
94
+
95
+
if do_jitter:
96
+
z += jitter
97
+
mask &= (torch.abs(z) < limit)
98
+
iters += mask
99
+
#for i in range(iterations):
100
+
# im = torch.abs(z.imag) * j
101
+
# z = (torch.abs(z.real) + im)**2 + c_n
102
+
# totals +=
103
+
104
+
#totals += ~mask
105
+
totals += iters
106
+
#csave(mask, f"mask")
107
+
108
+
#csave(z, f"z{i}")
109
+
110
+
#msave(mask, f"mfinal_s{sample_idx}")
111
+
#csave(z, f"zfinal_s{sample_idx}")
112
+
113
+
#im = torch.log(iters)
114
+
#im = iters
115
+
#im = im / torch.max(im)
116
+
117
+
#msave(im, f"ifinal_s{sample_idx}")
118
+
#im = im * ~mask
119
+
120
+
#totals = torch.log(1. - totals)
121
+
#totals *= totals
122
+
123
+
#m = totals == 0. #torch.max(totals)
124
+
#totals = totals + torch.max(totals) * 0.5 * m
125
+
126
+
totals = totals / sample_ct / iterations#torch.max(totals)
127
+
msave(totals, final_file + "_")
128
+
129
+
#msave(totals**2, final_file + "_sq")
130
+
#msave(torch.sqrt(totals), final_file + "_rt")
131
+
totals = 1. - totals
132
+
#msave(totals, final_file + "_inv_")
133
+
#msave(totals**2, final_file + "_inv_sq")
134
+
msave(torch.sqrt(totals), final_file + "_inv_rt")
135
+
136
+
137
+
#csave(torch.nan_to_num(z, 0) * (mask), "zmfinal")
138
+
139
+
#zm_r = torch.nan_to_num(z.real, 0) * mask
140
+
#zm_i = torch.nan_to_num(z.imag, 0) * mask
141
+
142
+
#print(torch.max(torch.abs(torch.nan_to_num(z, 0) * mask)))
143
+
144
+
#msave(im, "imfinal")
145
+
146
+
#final = torch.stack([zm_r / 4 + 0.5, zm_i / 4 + 0.5, im])
147
+
148
+
#save(final, "final")
149
+
150
+
print(f"done in {time.perf_counter() - t0}s")
151
+
152
+
153
+
154
+
155
+
156
+
+35
sketch/old/pre_snakepyt/ising.py
+35
sketch/old/pre_snakepyt/ising.py
···
1
+
import time
2
+
from pathlib import Path
3
+
4
+
import torch
5
+
6
+
from util import *
7
+
8
+
@settings
9
+
def _s():
10
+
name = "template"
11
+
device = "cuda"
12
+
13
+
t_real = torch.double
14
+
t_complex = torch.cdouble
15
+
16
+
# 9 -> 512 | 10 -> 1024 | 11 -> 2048
17
+
scale_power = 9
18
+
scale = 2 ** scale_power
19
+
20
+
w, h = scale, scale
21
+
22
+
23
+
@ifmain(__name__, _s)
24
+
@timed
25
+
def _main(settings):
26
+
globals().update(settings.get())
27
+
run_dir = time.strftime(f"%d.%m.%Y/{name}_t%H.%M.%S")
28
+
Path("out/" + run_dir).mkdir(parents=True, exist_ok=True)
29
+
30
+
lattice = torch.randn([h,w], dtype=t_complex, device=device).round(0, 1)
31
+
32
+
with open(f"out/{run_dir}/settings.py", "w") as f:
33
+
f.write(settings.src)
34
+
torch.set_default_device(device)
35
+
+175
sketch/old/pre_snakepyt/lorenz.py
+175
sketch/old/pre_snakepyt/lorenz.py
···
1
+
import time
2
+
from pathlib import Path
3
+
4
+
import torch
5
+
6
+
from util import *
7
+
8
+
@settings
9
+
def _s():
10
+
name = "lorenz"
11
+
device = "cuda"
12
+
t_fp = torch.double
13
+
14
+
sigma = 10
15
+
rho = 28
16
+
beta = 8/3
17
+
18
+
tau = 3.14159265358979323 * 2
19
+
20
+
particle_count = 5000
21
+
iterations = 1000
22
+
23
+
# 2 ** 9 = 512; 10 => 1024; 11 => 2048
24
+
scale_power = 10
25
+
scale = 2 ** scale_power
26
+
27
+
origin = 0, 0
28
+
span = 60, 40
29
+
30
+
zooms = [
31
+
]
32
+
33
+
dt = 0.005
34
+
35
+
36
+
def rk4_step(get_derivative, take_step, position_0):
37
+
direction_0 = get_derivative(position_0)
38
+
position_1 = take_step(position_0, direction_0, 0.5)
39
+
direction_1 = get_derivative(position_1)
40
+
position_2 = take_step(position_0, direction_1, 0.5)
41
+
direction_2 = get_derivative(position_2)
42
+
position_3 = take_step(position_0, direction_2, 1)
43
+
direction_3 = get_derivative(position_3)
44
+
final_direction = (direction_0 + 2 * (direction_1 + direction_2) + direction_3) / 6
45
+
return take_step(position_0, final_direction, 1)
46
+
47
+
def euler_step(get_derivative, take_step, position_0):
48
+
return take_step(position_0, get_derivative(position_0), 1)
49
+
50
+
51
+
def idxput(tensor, indices, values):
52
+
tensor.index_put_((indices[:,0],indices[:,1]), values, accumulate=False)
53
+
54
+
@ifmain(__name__, _s)
55
+
@timed
56
+
def _main(settings):
57
+
globals().update(settings.get())
58
+
run_dir = time.strftime(f"%d.%m.%Y/{name}_t%H.%M.%S")
59
+
Path("out/" + run_dir).mkdir(parents=True, exist_ok=True)
60
+
61
+
global span
62
+
63
+
x_min = origin[0] - (span[0] / 2)
64
+
y_min = origin[1] - (span[1] / 2)
65
+
66
+
for ((xa, xb), (ya, yb)) in zooms:
67
+
x_min += span[0] * xa
68
+
y_min += span[1] * ya
69
+
span = span[0] * (xb - xa), span[1] * (yb - ya)
70
+
71
+
x_max = x_min + span[0]
72
+
y_max = y_min + span[1]
73
+
74
+
aspect = span[0] / span[1]
75
+
76
+
if aspect < 1:
77
+
h = scale
78
+
w = int(scale * aspect)
79
+
else:
80
+
w = scale
81
+
h = int(scale / aspect)
82
+
83
+
84
+
dx = lambda x,y,z: sigma * (y - x)
85
+
dy = lambda x,y,z: x * (rho - z) - y
86
+
dz = lambda x,y,z: x * y - beta * z
87
+
88
+
p_positions = (torch.rand([3, particle_count], device=device, dtype=t_fp) - 0.5) * 40
89
+
p_positions[0] = torch.linspace(-0.1, 0.1, particle_count)
90
+
p_positions[1] = torch.linspace(-0.1, 0.1, particle_count)
91
+
p_positions[2] = torch.linspace(-0.1, 0.1, particle_count)
92
+
93
+
94
+
p_colors = torch.rand([particle_count,3], device=device, dtype=t_fp)
95
+
96
+
color_rotation = torch.linspace(0, tau / 4, particle_count)
97
+
98
+
p_colors[:,0] = torch.cos(color_rotation)
99
+
p_colors[:,1] *= 0
100
+
p_colors[:,2] = torch.sin(color_rotation)
101
+
102
+
canvas = torch.zeros([3, h, w], device=device, dtype=t_fp)
103
+
scratch = torch.zeros([h, w, 3], device=device, dtype=t_fp)
104
+
105
+
step = lambda p, dp, h: p + dp * h * dt
106
+
107
+
def derivative(p):
108
+
dp = torch.zeros_like(p)
109
+
dp[0] = dx(*p)
110
+
dp[1] = dy(*p)
111
+
dp[2] = dz(*p)
112
+
return dp
113
+
114
+
rk4_curried = lambda p: rk4_step(derivative, step, p)
115
+
116
+
def project(p):
117
+
return p[0:2]
118
+
119
+
for iteration in range(iterations):
120
+
p_projected = project(p_positions).clone()
121
+
122
+
if iteration % 500 == 0:
123
+
p_mask = torch.ones([particle_count], device=device)
124
+
p_mask *= (p_projected[1] >= x_min) * (p_projected[1] <= x_max)
125
+
p_mask *= (p_projected[0] >= y_min) * (p_projected[0] <= y_max)
126
+
127
+
in_range = p_mask.nonzero().squeeze()
128
+
129
+
p_projected_filtered = torch.index_select(p_projected.permute(1,0), 0, in_range)
130
+
p_indices = p_projected_filtered
131
+
p_colors_filtered = torch.index_select(p_colors, 0, in_range)
132
+
133
+
p_indices[:,1] -= x_min
134
+
p_indices[:,1] *= (w-1) / (x_max - x_min)
135
+
p_indices[:,0] -= y_min
136
+
p_indices[:,0] *= (h-1) / (y_max - y_min)
137
+
138
+
p_indices = p_indices.long()
139
+
140
+
scratch *= 0
141
+
idxput(scratch, p_indices, p_colors_filtered)
142
+
#canvas += scratch.permute(2,0,1)
143
+
144
+
145
+
#temp = canvas.clone()
146
+
#temp[0] -= temp[0].mean()
147
+
#temp[1] -= temp[1].mean()
148
+
#temp[2] -= temp[2].mean()
149
+
150
+
#temp[0] /= 8 * temp[0].std()
151
+
#temp[1] /= 8 * temp[1].std()
152
+
#temp[2] /= 8 * temp[2].std()
153
+
154
+
#temp += 0.5
155
+
#save(temp, f"{run_dir}/{iteration}")
156
+
save(scratch.permute(2,0,1), f"{run_dir}/{iteration}_b")
157
+
158
+
p_positions = rk4_curried(p_positions)
159
+
160
+
#canvas[0] -= canvas[0].mean()
161
+
#canvas[1] -= canvas[1].mean()
162
+
#canvas[2] -= canvas[2].mean()
163
+
164
+
#canvas[0] /= 8 * canvas[0].std()
165
+
#canvas[1] /= 8 * canvas[1].std()
166
+
#canvas[2] /= 8 * canvas[2].std()
167
+
168
+
#canvas += 0.5
169
+
170
+
#save(canvas, f"{run_dir}/final")
171
+
172
+
with open(f"out/{run_dir}/settings.py", "w") as f:
173
+
f.write(settings.src)
174
+
torch.set_default_device(device)
175
+
+149
sketch/old/pre_snakepyt/lyapunov.py
+149
sketch/old/pre_snakepyt/lyapunov.py
···
1
+
import torch
2
+
import time
3
+
import itertools
4
+
from random import random
5
+
6
+
from PIL import Image
7
+
# monochrome, 0 to 1
8
+
def mpilify(z):
9
+
z_3d = torch.stack([z, z, z])
10
+
z_norm = z_3d.clamp(0, 1)
11
+
z_np = z_norm.detach().cpu().permute(1, 2, 0).numpy()
12
+
z_bytes = (z_np * 255).round().astype("uint8")
13
+
return Image.fromarray(z_bytes)
14
+
15
+
def msave(x, f):
16
+
mpilify(x).save(f"out/{f}.png")
17
+
18
+
# config
19
+
20
+
21
+
# 2 ** 9 = 512; 10 => 1024; 11 => 2048
22
+
scale = 2 ** 14
23
+
24
+
origin = 3.80576, 3.8096
25
+
span = 0.01309, 0.00892
26
+
27
+
zooms = [
28
+
]
29
+
30
+
dev = "cuda"
31
+
t_fp = torch.double
32
+
t_cfp = torch.cdouble
33
+
34
+
seq = "BBABABA"
35
+
c = 3.5
36
+
x_init = 0.499
37
+
alpha = 0.907
38
+
39
+
iterations = 4000
40
+
41
+
discontinuity = True
42
+
43
+
final_file = f"{seq}_sq_e14_dis_rngb"
44
+
45
+
# funcs
46
+
47
+
def logistic_map(r, x):
48
+
x_inf_mask = torch.isinf(x)
49
+
_x = r * x * (1 - x)
50
+
return torch.nan_to_num(_x) * ~x_inf_mask - x * x_inf_mask
51
+
52
+
def lyapunov_term(r, x):
53
+
x_inf_mask = torch.isinf(x)
54
+
term = torch.log(torch.abs(r * (1 - 2 * x)))
55
+
return torch.nan_to_num(term) * ~x_inf_mask - x * x_inf_mask
56
+
57
+
def repeat(seq):
58
+
i = 0
59
+
l = len(seq)
60
+
while True:
61
+
yield seq[i%l]
62
+
i += 1
63
+
64
+
def c_avg(prev, val, n):
65
+
"""cumulative average
66
+
prev: previous result (should be zero for n=1)
67
+
val: next value
68
+
n: how many values given so far, including this value"""
69
+
prev_inf_mask = torch.isinf(prev)
70
+
_next = prev + (val - prev) / n
71
+
return torch.nan_to_num(_next) * ~prev_inf_mask + prev * prev_inf_mask
72
+
73
+
# init
74
+
75
+
x_min = origin[0]
76
+
y_min = origin[1]
77
+
78
+
for ((xa, xb), (ya, yb)) in zooms:
79
+
x_min += span[0] * xa
80
+
y_min += span[1] * ya
81
+
span = span[0] * (xb - xa), span[1] * (yb - ya)
82
+
83
+
x_max = x_min + span[0]
84
+
y_max = y_min + span[1]
85
+
86
+
aspect = span[0] / span[1]
87
+
88
+
if aspect < 1:
89
+
h = scale
90
+
w = int(scale * aspect)
91
+
else:
92
+
w = scale
93
+
h = int(scale / aspect)
94
+
95
+
96
+
def main():
97
+
x = torch.ones([h, w], device=dev, dtype=t_fp) * x_init
98
+
ab = torch.zeros([h, w], device=dev, dtype=t_cfp)
99
+
100
+
lyapunov_avg = torch.zeros([h, w], device=dev, dtype=t_fp)
101
+
102
+
yspace = torch.linspace(y_min, y_max, h, dtype=t_fp, device=dev)
103
+
xspace = torch.linspace(x_min, x_max, w, dtype=t_fp, device=dev)
104
+
for _x in range(h):
105
+
ab[_x] += xspace
106
+
for _y in range(w):
107
+
ab[:, _y] += yspace * 1j
108
+
109
+
gen = itertools.islice(repeat(seq), iterations)
110
+
111
+
flip = False
112
+
for idx, seq_elem in enumerate(gen):
113
+
if random() > 0.99:
114
+
flip = not flip
115
+
if seq_elem == "C":
116
+
r = c
117
+
else:
118
+
if flip:
119
+
if seq_elem == "A":
120
+
seq_elem = "B"
121
+
else:
122
+
seq_elem = "A"
123
+
r = ab.real if seq_elem == "A" else ab.imag
124
+
125
+
if idx > 600:
126
+
lyapunov_avg = c_avg(lyapunov_avg, lyapunov_term(r, x), idx)
127
+
128
+
if idx < iterations - 1:
129
+
if discontinuity:
130
+
mask = x <= 0.5
131
+
x = logistic_map(r, x)
132
+
if discontinuity:
133
+
x += mask * (alpha - 1) * (r - 2) / 4
134
+
135
+
#msave(torch.tanh(lyapunov_avg / 4) / 2 + 0.5, f"avg_{idx}")
136
+
137
+
result = torch.tanh(lyapunov_avg) / 2 + 0.5
138
+
msave(result, final_file)
139
+
#msave(1 - result**2, final_file + "sq")
140
+
#msave(1 - torch.sqrt(result), final_file + "sqrt")
141
+
#msave(result ** 2, final_file + "sq_p")
142
+
#msave(torch.sqrt(result), final_file + "sqrt_p")
143
+
144
+
145
+
if __name__ == "__main__":
146
+
t0 = time.perf_counter()
147
+
main()
148
+
print(f"done in {time.perf_counter() - t0}s")
149
+
+160
sketch/old/pre_snakepyt/raytrace.py
+160
sketch/old/pre_snakepyt/raytrace.py
···
1
+
import torch
2
+
import time
3
+
4
+
from pathlib import Path
5
+
6
+
from util import *
7
+
8
+
@settings
9
+
def _s():
10
+
name = "qjulia_11_40"
11
+
12
+
# 2**10 = 1024
13
+
scale = 2 ** 8
14
+
w = 2 * scale // 3
15
+
h = scale
16
+
17
+
max_steps = 500
18
+
mandel_iters = 40#50
19
+
max_samples = 4
20
+
21
+
noise_scale = 1. / (2 * scale)
22
+
23
+
zoom = 1
24
+
#frame_center = -0.5, -0.5, 0
25
+
frame_center = 0, 0, -1 * zoom
26
+
step_dir = 0, 0, 1
27
+
#frame_span = 1, 1
28
+
frame_span = 2 * zoom, 3 * zoom
29
+
max_depth = 2 * zoom
30
+
31
+
dev = "cuda"
32
+
t_cfp = torch.cfloat
33
+
t_fp = torch.float
34
+
35
+
pi = 3.14159265358979323
36
+
a, b, c = pi / 3, pi + 0.2834298, pi / 4
37
+
38
+
from math import sin, cos
39
+
sina, sinb, sinc = sin(a), sin(b), sin(c)
40
+
cosa, cosb, cosc = cos(a), cos(b), cos(c)
41
+
42
+
rot_x = torch.tensor([
43
+
[1,0,0],
44
+
[0,cosc,-sinc],
45
+
[0,sinc,cosc]], dtype=t_fp, device=dev)
46
+
rot_y = torch.tensor([
47
+
[cosb,0,sinb],
48
+
[0,1,0],
49
+
[-sinb,0,cosb]], dtype=t_fp, device=dev)
50
+
rot_z = torch.tensor([
51
+
[cosa,-sina,0],
52
+
[sina,cosa,0],
53
+
[0,0,1]], dtype=t_fp, device=dev)
54
+
55
+
rot = rot_z @ rot_y @ rot_x
56
+
57
+
quat_last = 0
58
+
quat_c = [-1, 0.2, 0, 0]
59
+
60
+
def hit_mandel(pos):
61
+
z = 0 #pos[2] * (0.2 * pos[2] + 0.5j)
62
+
c = torch.view_as_complex(pos[0:2].permute(1,2,0).contiguous())
63
+
for i in range(mandel_iters):
64
+
z = z**2 + pos[2]*z + c
65
+
return torch.abs(z) < 2
66
+
67
+
def hit_power9_bulb(pos):
68
+
#v = rot @ pos #torch.clone(pos)
69
+
v = (pos.permute(1,2,0) @ rot).permute(2,0,1)
70
+
for i in range(mandel_iters):
71
+
x = v[0]
72
+
y = v[1]
73
+
z = v[2]
74
+
x_term = 1j * torch.sqrt(y**2 + z**2)
75
+
y_term = 1j * torch.sqrt(x**2 + z**2)
76
+
z_term = 1j * torch.sqrt(y**2 + x**2)
77
+
v[0] = (x + x_term)**9 / 2 + (x - x_term)**9 / 2 + pos[0]
78
+
v[1] = (y + y_term)**9 / 2 + (y - y_term)**9 / 2 + pos[1]
79
+
v[2] = (z + z_term)**9 / 2 + (z - z_term)**9 / 2 + pos[2]
80
+
n = torch.norm(v, dim=0)
81
+
return ~(n < 2)
82
+
83
+
def hit_julia_q(pos):
84
+
v = (pos.permute(1,2,0) @ rot).permute(2,0,1)
85
+
v = torch.cat((v, torch.ones([1,h,w], dtype=t_fp, device=dev) * quat_last))
86
+
for i in range(mandel_iters):
87
+
r, a, b, c = v[0], v[1], v[2], v[3]
88
+
_r = r*r - a*a - b*b - c*c
89
+
_a = 2*r*a
90
+
_b = 2*r*b
91
+
_c = 2*r*c
92
+
v[0] = _r + quat_c[0]
93
+
v[1] = _a + quat_c[1]
94
+
v[2] = _b + quat_c[2]
95
+
v[3] = _c + quat_c[3]
96
+
n = torch.norm(v, dim=0)
97
+
return n < 4
98
+
99
+
def hit_ball(pos):
100
+
return torch.norm(pos, dim=0) > 0.9
101
+
102
+
@timed
103
+
@ifmain(__name__, _s)
104
+
def _m(settings):
105
+
globals().update(settings.get())
106
+
run_dir = time.strftime(f"%d.%m.%Y/{name}_t%H.%M.%S")
107
+
Path("out/" + run_dir).mkdir(parents=True, exist_ok=True)
108
+
with open(f"out/{run_dir}/settings.py", "w") as f:
109
+
f.write(settings.src)
110
+
torch.set_default_device(dev)
111
+
112
+
start = torch.zeros([h, w], dtype=t_cfp)
113
+
#res = torch.zeros([h,w], device=dev, dtype=t_fp)
114
+
res = torch.ones([h,w], dtype=t_fp)
115
+
116
+
half_x = frame_span[0] / 2
117
+
half_y = frame_span[1] / 2
118
+
yspace = torch.linspace(-half_y, half_y, h, dtype=t_fp)
119
+
xspace = torch.linspace(-half_x, half_x, w, dtype=t_fp)
120
+
for _x in range(h):
121
+
start[_x] += xspace
122
+
for _y in range(w):
123
+
start[:, _y] += yspace * 1j
124
+
125
+
start = torch.stack((start.real - frame_center[0], start.imag - frame_center[1], torch.ones([h, w], dtype=t_fp) * frame_center[2]))
126
+
127
+
step = torch.stack([torch.ones([h,w], dtype=t_fp)*x for x in step_dir]) * max_depth / max_steps
128
+
129
+
s_res = []
130
+
v_res = []
131
+
for sample_idx in range(max_samples):
132
+
pos = start + torch.randn([3,h,w], dtype=t_fp) * noise_scale
133
+
s_res.append(torch.ones_like(res))
134
+
v_res.append(torch.zeros_like(res))
135
+
for step_idx in range(max_steps):
136
+
pos += step
137
+
hit = hit_3(pos) #* hit_2(pos)
138
+
hit_adj = hit * (s_res[sample_idx] > (step_idx / max_steps))
139
+
hit_val = hit_adj * (step_idx / max_steps)
140
+
#hit_val = hit * (1 / max_steps)
141
+
v_res[sample_idx] = v_res[sample_idx] + hit * (1. / max_steps)
142
+
s_res[sample_idx] = (s_res[sample_idx] * ~hit_adj) + hit_val #hit / max_samples / max_steps
143
+
if step_idx % 10 == 999:
144
+
print(step_idx)
145
+
print(hit.sum())
146
+
print(hit_adj.sum())
147
+
print(s_res[sample_idx].sum())
148
+
149
+
s_res = torch.stack(s_res).mean(dim=0)
150
+
v_res = torch.stack(v_res).mean(dim=0)
151
+
152
+
#res = res % (1/5)
153
+
#res = torch.sqrt(res)# * 5)
154
+
msave(1-s_res, f"{run_dir}/res_s")
155
+
msave(v_res, f"{run_dir}/res_v")
156
+
#msave(1-res, out_file)
157
+
#res_mv = torch.cat([res, torch.flip(res, dims=(0,))])
158
+
#res_mh = torch.cat([res_mv, torch.flip(res_mv, dims=(1,))], dim=1)
159
+
#msave(res_mh, out_file)
160
+
+37
sketch/old/pre_snakepyt/scratch.py
+37
sketch/old/pre_snakepyt/scratch.py
···
1
+
from util import *
2
+
import torch
3
+
import torchvision
4
+
5
+
@timed
6
+
def a(z, n):
7
+
for i in range(n):
8
+
#msave_gpu(z, "test/f")
9
+
#torchvision.utils.save_image(z, f"out/test/new_{i}.png", "png")
10
+
msave(z, f"test/old_{i}")
11
+
#mpilify(z)
12
+
13
+
@timed
14
+
def b(z, n):
15
+
for i in range(n):
16
+
#torch.save(z.type(torch.uint8), f"out/test/new_{i}.pt")
17
+
msave_alt(z, f"test/new_{i}")
18
+
#mpilify_gpu(z)
19
+
20
+
@ifmain(__name__)
21
+
def _main():
22
+
s = 2048
23
+
h,w = s, s
24
+
25
+
z = torch.randn((h, w), device="cuda", dtype=torch.double);
26
+
27
+
z.mul_(3.14159)
28
+
29
+
n = 20
30
+
31
+
a(z, n)
32
+
33
+
b(z, n)
34
+
35
+
a(z, n)
36
+
37
+
b(z, n)
+128
sketch/old/pre_snakepyt/slime.py
+128
sketch/old/pre_snakepyt/slime.py
···
1
+
import time
2
+
import inspect
3
+
from random import randint
4
+
from pathlib import Path
5
+
6
+
import torch
7
+
from torchvision.transforms import GaussianBlur
8
+
9
+
from util import *
10
+
11
+
@settings
12
+
def _s():
13
+
from math import tau
14
+
15
+
name = "slime"
16
+
17
+
scale = 2 ** 11
18
+
w = scale
19
+
h = scale
20
+
21
+
max_iterations = 1000
22
+
save_mod = 100
23
+
also_save = [10, 50, 150]
24
+
25
+
particle_count = 10000000 // 4
26
+
decay_rate = 0.1
27
+
28
+
view_angle = tau / 5
29
+
view_distance = 1.414 * 140
30
+
31
+
direction_count = 12
32
+
turn_amount = tau / 6#direction_count
33
+
move_distance = 1.414 * 2
34
+
35
+
blur_size = 1
36
+
blur_sigma = 1.5
37
+
38
+
dtype = torch.double
39
+
ctype = torch.cdouble
40
+
device = "cuda"
41
+
42
+
def offset_coord(position, angle, distance):
43
+
res = torch.clone(position)
44
+
res[:,0] += (torch.sin(angle) * distance)
45
+
res[:,1] += (torch.cos(angle) * distance)
46
+
return res
47
+
48
+
def clamp_coords(t, h, w):
49
+
t[:,0] = (t[:,0] + (h-1)) % (h-1)
50
+
t[:,1] = (t[:,1] + (w-1)) % (w-1)
51
+
52
+
def idxput(tensor, indices, value):
53
+
tensor.index_put_((indices[:,0],indices[:,1]), value)
54
+
55
+
@ifmain(__name__, _s)
56
+
@timed
57
+
def _main(settings):
58
+
globals().update(settings.get())
59
+
run_dir = time.strftime(f"%d.%m.%Y/{name}_t%H.%M.%S")
60
+
Path("out/" + run_dir).mkdir(parents=True, exist_ok=True)
61
+
with open(f"out/{run_dir}/settings.py", "w") as f:
62
+
f.write(settings.src)
63
+
torch.set_default_device(device)
64
+
65
+
blur = GaussianBlur(blur_size, blur_sigma)
66
+
67
+
# p_ denotes particle data
68
+
p_positions = torch.rand([particle_count,2])
69
+
p_positions[:,0] *= h
70
+
p_positions[:,1] *= w
71
+
72
+
p_directions = ((torch.rand(particle_count)*direction_count).floor() / direction_count) * 2 * tau
73
+
74
+
world = torch.zeros([3,h,w], dtype=dtype)
75
+
scratch = torch.zeros([3,h,w], dtype=dtype)
76
+
t0 = torch.tensor([0], dtype=dtype)
77
+
t1 = torch.tensor([1], dtype=dtype)
78
+
79
+
for iteration in range(max_iterations):
80
+
81
+
# sense
82
+
angles = (-view_angle, 0, view_angle)
83
+
angles = (p_directions + a for a in angles)
84
+
sensor_coords = (offset_coord(p_positions, a, view_distance) for a in angles)
85
+
sc = [sc for sc in sensor_coords]
86
+
for c in sc:
87
+
clamp_coords(c, h, w)
88
+
89
+
sci = [c.long() for c in sc]
90
+
91
+
senses = [world[0].take(c[:,0] * w + c[:,1]) for c in sci]
92
+
sense_neg = senses[0] > senses[1]
93
+
sense_pos = senses[2] > senses[1]
94
+
#sense_mid = ~sense_neg * ~sense_pos # forward highest
95
+
sense_out = sense_neg * sense_pos # forward lowest
96
+
sense_neg = sense_neg * ~sense_out # negative lowest
97
+
sense_pos = sense_pos * ~sense_out # negative highest
98
+
99
+
# rotate
100
+
#p_directions += torch.rand(particle_count) * sense_out * turn_amount
101
+
mask = torch.rand(particle_count) > 0.5
102
+
p_directions += mask * sense_out * turn_amount
103
+
p_directions -= ~mask * sense_out * turn_amount
104
+
p_directions += sense_pos * turn_amount
105
+
p_directions -= sense_neg * turn_amount
106
+
107
+
# move
108
+
p_positions = offset_coord(p_positions, p_directions, move_distance)
109
+
clamp_coords(p_positions, h, w)
110
+
111
+
# deposit
112
+
idxput(world[0], p_positions.long(), t1)
113
+
114
+
# diffuse
115
+
world = blur(world)
116
+
117
+
# render
118
+
if iteration % save_mod == 0 and iteration != 0:
119
+
msave(world[0], f"{run_dir}/{iteration}")
120
+
121
+
if iteration in also_save:
122
+
msave(world[0], f"{run_dir}/{iteration}")
123
+
124
+
# decay
125
+
if iteration < max_iterations - 1:
126
+
world *= decay_rate
127
+
128
+
msave(world[0], f"{run_dir}/final")
+165
sketch/old/pre_snakepyt/stdmap.py
+165
sketch/old/pre_snakepyt/stdmap.py
···
1
+
# "standard map" aka Chirikov-Taylor map
2
+
3
+
# p_next = p + K * sin(theta)
4
+
# theta_next = theta + p_next
5
+
6
+
import torch
7
+
import time
8
+
import itertools
9
+
from random import random
10
+
11
+
from PIL import Image
12
+
# monochrome, 0 to 1
13
+
def mpilify(z):
14
+
z_3d = torch.stack([z, z, z])
15
+
z_norm = z_3d.clamp(0, 1)
16
+
z_np = z_norm.detach().cpu().permute(1, 2, 0).numpy()
17
+
z_bytes = (z_np * 255).round().astype("uint8")
18
+
return Image.fromarray(z_bytes)
19
+
20
+
def pilify(z):
21
+
z_norm = z.clamp(0, 1)
22
+
z_np = z_norm.detach().cpu().permute(1, 2, 0).numpy()
23
+
z_bytes = (z_np * 255).round().astype("uint8")
24
+
return Image.fromarray(z_bytes)
25
+
26
+
27
+
def msave(x, f):
28
+
mpilify(x).save(f"out/{f}.png")
29
+
30
+
def save(x, f):
31
+
pilify(x).save(f"out/{f}.png")
32
+
33
+
# config
34
+
35
+
tau = 3.14159265358979323 * 2
36
+
37
+
# 2 ** 9 = 512; 10 => 1024; 11 => 2048
38
+
scale_power = 12
39
+
scale = 2 ** scale_power
40
+
41
+
origin = tau / 2, tau / 2
42
+
span = tau, tau
43
+
44
+
zooms = [
45
+
#((0.333, 0.667), (0.333, 0.667))
46
+
]
47
+
48
+
dev = "cuda"
49
+
t_fp = torch.double
50
+
t_cfp = torch.cdouble
51
+
52
+
k = 0.971635406
53
+
54
+
particle_count = 500000
55
+
56
+
iterations = 1000
57
+
58
+
randomness = 0
59
+
60
+
growth = 0.00001
61
+
62
+
final_file = f"k{k}_i{iterations}_p{particle_count}_s{scale_power}_r{randomness}_b"
63
+
64
+
# funcs
65
+
66
+
def offset_mod(x):
67
+
return torch.remainder(x, tau)
68
+
69
+
def idxput(tensor, indices, values):
70
+
tensor.index_put_((indices[:,0],indices[:,1]), values, accumulate=False)
71
+
72
+
# init
73
+
74
+
x_min = origin[0] - (span[0] / 2)
75
+
y_min = origin[1] - (span[1] / 2)
76
+
77
+
for ((xa, xb), (ya, yb)) in zooms:
78
+
x_min += span[0] * xa
79
+
y_min += span[1] * ya
80
+
span = span[0] * (xb - xa), span[1] * (yb - ya)
81
+
82
+
x_max = x_min + span[0]
83
+
y_max = y_min + span[1]
84
+
85
+
aspect = span[0] / span[1]
86
+
87
+
if aspect < 1:
88
+
h = scale
89
+
w = int(scale * aspect)
90
+
else:
91
+
w = scale
92
+
h = int(scale / aspect)
93
+
94
+
print(f"x {x_min} -> {x_max}")
95
+
print(f"y {y_min} -> {y_max}")
96
+
97
+
def main():
98
+
world = torch.zeros([3, h, w], device=dev, dtype=t_fp)
99
+
scratch = torch.zeros([h, w, 3], device=dev, dtype=t_fp)
100
+
p_positions = torch.rand([particle_count,2], device=dev, dtype=t_fp) * tau
101
+
102
+
p_positions[:,0] = torch.linspace(0, tau, particle_count)
103
+
p_positions[:,1] = torch.linspace(0, tau, particle_count)
104
+
105
+
p_colors = torch.rand([particle_count,3], device=dev, dtype=t_fp)
106
+
107
+
color_rotation = torch.linspace(0, tau / 4, particle_count)
108
+
109
+
p_colors[:,0] = torch.cos(color_rotation)
110
+
p_colors[:,1] *= 0
111
+
p_colors[:,2] = torch.sin(color_rotation)
112
+
113
+
for iteration in range(iterations):
114
+
k_r = k + (random() - 0.5) * randomness + growth * iteration
115
+
p_positions[:,0] = p_positions[:,0] - k_r * torch.sin(p_positions[:,1])
116
+
p_positions[:,1] += p_positions[:,0]
117
+
p_positions[:,0] = offset_mod(p_positions[:,0])
118
+
p_positions[:,1] = offset_mod(p_positions[:,1])
119
+
120
+
if len(zooms) > 0:
121
+
p_mask = torch.ones([particle_count], device=dev)
122
+
p_mask *= (p_positions[:,0] >= x_min) * (p_positions[:,0] <= x_max)
123
+
p_mask *= (p_positions[:,1] >= y_min) * (p_positions[:,1] <= y_max)
124
+
125
+
in_range = p_mask.nonzero().squeeze()
126
+
127
+
p_positions_filtered = torch.index_select(p_positions, 0, in_range)
128
+
p_indices = p_positions_filtered
129
+
p_colors_filtered = torch.index_select(p_colors, 0, in_range)
130
+
else:
131
+
p_positions_filtered = p_positions
132
+
p_indices = p_positions.clone()
133
+
p_colors_filtered = p_colors
134
+
135
+
p_indices[:,0] -= x_min
136
+
p_indices[:,0] *= (w-1) / (x_max - x_min)
137
+
p_indices[:,1] -= y_min
138
+
p_indices[:,1] *= (h-1) / (y_max - y_min)
139
+
140
+
p_indices = p_indices.long()
141
+
142
+
scratch *= 0
143
+
idxput(scratch, p_indices, p_colors_filtered)
144
+
world += scratch.permute(2,0,1)
145
+
146
+
if iteration % 1000 == 0:
147
+
print(f"iteration {iteration}")
148
+
149
+
world[0] -= world[0].mean()
150
+
world[1] -= world[1].mean()
151
+
world[2] -= world[2].mean()
152
+
153
+
world[0] /= 8 * world[0].std()
154
+
world[1] /= 8 * world[1].std()
155
+
world[2] /= 8 * world[2].std()
156
+
157
+
world += 0.5
158
+
159
+
save(world, final_file)
160
+
161
+
if __name__ == "__main__":
162
+
t0 = time.perf_counter()
163
+
main()
164
+
print(f"done in {time.perf_counter() - t0}s")
165
+
+28
sketch/old/pre_snakepyt/template.py
+28
sketch/old/pre_snakepyt/template.py
···
1
+
import time
2
+
from pathlib import Path
3
+
4
+
import torch
5
+
6
+
from util import *
7
+
8
+
@settings
9
+
def _s():
10
+
name = "template"
11
+
device = "cuda"
12
+
13
+
t_real = torch.double
14
+
t_complex = torch.cdouble
15
+
16
+
@ifmain(__name__, _s)
17
+
@timed
18
+
def _main(settings):
19
+
globals().update(settings.get())
20
+
run_dir = time.strftime(f"%d.%m.%Y/{name}_t%H.%M.%S")
21
+
Path("out/" + run_dir).mkdir(parents=True, exist_ok=True)
22
+
23
+
# { code }
24
+
25
+
with open(f"out/{run_dir}/settings.py", "w") as f:
26
+
f.write(settings.src)
27
+
torch.set_default_device(device)
28
+
+84
sketch/old/pre_snakepyt/worley.py
+84
sketch/old/pre_snakepyt/worley.py
···
1
+
import time
2
+
from pathlib import Path
3
+
4
+
import torch
5
+
import math
6
+
7
+
from util import *
8
+
9
+
@settings
10
+
def _s():
11
+
name = "worley"
12
+
keep_records = False
13
+
14
+
subcell_size = (16, 16, 16)
15
+
subcell_counts = (8, 8, 8)
16
+
max_feature_points = 9
17
+
feature_point_density = 4
18
+
19
+
device = "cuda"
20
+
dtype = torch.double
21
+
22
+
assert len(subcell_size) == len(subcell_counts), "dimensional consistency"
23
+
24
+
def tuple_mul(a,b):
25
+
return tuple(a*b for a,b in zip(a,b))
26
+
27
+
def factorial(tensor):
28
+
return torch.exp(torch.lgamma(tensor + 1))
29
+
30
+
def prob_n_points(n, density):
31
+
return 1 / (math.pow(density, -n) * math.exp(density) * math.factorial(n))
32
+
33
+
34
+
@ifmain(__name__, _s)
35
+
@timed
36
+
def _main(settings):
37
+
globals().update(settings.get())
38
+
run_dir = ""
39
+
if keep_records:
40
+
run_dir = time.strftime(f"%d.%m.%Y/{name}_t%H.%M.%S")
41
+
Path("out/" + run_dir).mkdir(parents=True, exist_ok=True)
42
+
with open(f"out/{run_dir}/settings.py", "w") as f:
43
+
f.write(settings.src)
44
+
torch.set_default_device(device)
45
+
46
+
dim = len(subcell_size)
47
+
48
+
seed_points_shape = subcell_counts + (max_feature_points, dim)
49
+
50
+
seed_points = torch.rand(seed_points_shape, dtype=dtype).mul_(2).add_(-1)
51
+
52
+
result_shape = tuple_mul(subcell_size, subcell_counts)
53
+
54
+
result = torch.zeros(result_shape, dtype=dtype)
55
+
56
+
rng = torch.rand(subcell_counts)
57
+
mask = torch.zeros(subcell_counts + (max_feature_points,), dtype=torch.bool)
58
+
prob = 0
59
+
for n in range(max_feature_points):
60
+
prob += prob_n_points(n, feature_point_density)
61
+
mask[...,n] = rng > prob
62
+
#infs = ~mask * torch.inf
63
+
#for dim_index in range(dim):
64
+
# seed_points[...,dim_index].mul_(infs)
65
+
66
+
linspaces = [torch.arange(0, subcell_counts[d], 1 / subcell_size[d]) for d in range(dim)]
67
+
grid_coords = torch.meshgrid(linspaces, indexing="ij")
68
+
69
+
grid_indices = grid_coords[0].long().clone()
70
+
for d in range(dim-1):
71
+
grid_indices.add_(grid_coords[d+1].long() * subcell_counts[d])
72
+
73
+
for d in range(dim):
74
+
print(torch.take(seed_points[...,d], grid_indices))
75
+
76
+
#breakpoint()
77
+
78
+
# MxNxO points
79
+
# AxBxC subcells = M//subdiv N//subdiv O//subdiv
80
+
# one point randomly placed in each cell
81
+
# AxBxCx
82
+
# every point is closest to a point in one of its neighbor cells
83
+
# figure out distance to that point
84
+
+149
sketch/old/snakepyt_0_0/autograd/image_approx.py
+149
sketch/old/snakepyt_0_0/autograd/image_approx.py
···
1
+
2
+
import torch
3
+
4
+
from lib.util import load_image_tensor, save, msave
5
+
from lib.spaces import grid, xrange_yrange
6
+
7
+
def init():
8
+
t_real = torch.double
9
+
10
+
torch.set_default_device("cuda")
11
+
torch.set_default_dtype(t_real)
12
+
13
+
im = load_image_tensor("in/flower.jpg").to(t_real).to("cuda")
14
+
15
+
(c, h, w) = im.shape
16
+
17
+
18
+
n_a = 4
19
+
20
+
a = torch.rand([n_a,c,h]) - 0.5 #* 0.1 - 0.05
21
+
b = torch.rand([n_a,c,w]) - 0.5 #* 0.1 - 0.05
22
+
a *= 0.01
23
+
b *= 0.01
24
+
shifts = torch.rand([12])
25
+
coefs = torch.ones([12]) / 12
26
+
27
+
a = gradify(a)
28
+
b = gradify(b)
29
+
coefs = gradify(coefs)
30
+
shifts = gradify(shifts)
31
+
32
+
train_iters = 100000
33
+
def get_lr(i):
34
+
if i < 10:
35
+
return 1
36
+
if i < 100:
37
+
return 1
38
+
if i < 1000:
39
+
return 1e-1
40
+
if i < 10000:
41
+
return 1e-1
42
+
return 1e-1
43
+
44
+
save_on = lambda i: (i % 100 == 0) if i > 100 else (i % 10 == 0)
45
+
46
+
schedule(train, None)
47
+
48
+
def gradify(t):
49
+
return t.clone().detach().requires_grad_(True)
50
+
51
+
def train():
52
+
global a, b, coefs, shifts
53
+
54
+
im_bw = im.norm(p=2,dim=0)
55
+
56
+
g = grid((xrange_yrange((0,0), (7,7)), (h,w))).permute(2,0,1)
57
+
58
+
59
+
for n in range(train_iters):
60
+
copies = a.view(n_a,c,h,1) * b.view(n_a,c,1,w)
61
+
62
+
_g = g * 5
63
+
_g[0] += shifts[0]
64
+
_g[1] += shifts[1]
65
+
s_0 = torch.sin(_g[0]) * copies[0] + torch.cos(_g[0]) * copies[1]
66
+
s_1 = torch.sin(_g[1]) * copies[2] + torch.cos(_g[1]) * copies[3]
67
+
_g = g * 11
68
+
_g[0] += shifts[2]
69
+
_g[1] += shifts[3]
70
+
s_2 = torch.sin(_g[0]) * copies[0] + torch.cos(_g[0]) * copies[2]
71
+
s_3 = torch.sin(_g[1]) * copies[1] + torch.cos(_g[1]) * copies[3]
72
+
_g = g * 23
73
+
_g[0] += shifts[4]
74
+
_g[1] += shifts[5]
75
+
s_4 = torch.sin(_g[0]) * copies[1] + torch.cos(_g[0]) * copies[0]
76
+
s_5 = torch.sin(_g[1]) * copies[3] + torch.cos(_g[1]) * copies[2]
77
+
_g = g * 57
78
+
_g[0] += shifts[6]
79
+
_g[1] += shifts[7]
80
+
s_6 = torch.sin(_g[0]) * copies[0] + torch.cos(_g[0]) * copies[1]
81
+
s_7 = torch.sin(_g[1]) * copies[2] + torch.cos(_g[1]) * copies[3]
82
+
_g = g * 101
83
+
_g[0] += shifts[8]
84
+
_g[1] += shifts[9]
85
+
s_8 = torch.sin(_g[0] + _g[1]) * copies[2] + torch.cos(_g[0] * _g[0] + _g[1] * _g[1]) * copies[3]
86
+
s_9 = torch.sin(_g[1] - _g[0]) * copies[0] + torch.cos(_g[1] * _g[1] + _g[0] * _g[0]) * copies[1]
87
+
88
+
s_10 = (g[0] + shifts[10]) * copies[2] - (g[0] + shifts[10]) * copies[3]
89
+
s_11 = (g[1] + shifts[11]) * copies[0] - (g[1] + shifts[11]) * copies[1]
90
+
91
+
srcs = torch.stack((s_0, s_1, s_2, s_3, s_4, s_5, s_6, s_7, s_8, s_9, s_10, s_11))
92
+
93
+
coefs_ = coefs #+ torch.rand_like(coefs) * 0.1
94
+
95
+
srcs_scaled = (srcs * coefs_.view(12, 1, 1, 1))
96
+
copy = srcs_scaled.sum(dim=0)
97
+
98
+
diff = copy - im
99
+
mse = (diff ** 2).sum()
100
+
101
+
mse.backward()
102
+
103
+
torch.nn.utils.clip_grad_norm_([a,b,coefs], max_norm=1.0)
104
+
with torch.no_grad():
105
+
learn_rate = get_lr(n)
106
+
a -= (a.grad) * learn_rate
107
+
b -= (b.grad) * learn_rate
108
+
coefs -= (coefs.grad) * learn_rate
109
+
shifts -= (shifts.grad) * learn_rate * 0.01
110
+
if save_on(n):
111
+
print(a.grad.norm())
112
+
print(b.grad.norm())
113
+
print(coefs.grad.norm())
114
+
print(shifts.grad.norm())
115
+
116
+
a.grad.zero_()
117
+
b.grad.zero_()
118
+
coefs.grad.zero_()
119
+
120
+
121
+
if save_on(n):
122
+
print(f"mse {mse.item()}")
123
+
with torch.no_grad():
124
+
out = copy - copy.min()
125
+
out /= out.max()
126
+
#print(coefs)
127
+
save(out, f"{run_dir}/{n:06d}")
128
+
#with torch.no_grad():
129
+
# c0 = copies[0] - copies[0].min()
130
+
# c1 = copies[1] - copies[1].min()
131
+
# c2 = copies[2] - copies[2].min()
132
+
# c3 = copies[3] - copies[3].min()
133
+
# c0 /= c0.max()
134
+
# c1 /= c1.max()
135
+
# c2 /= c2.max()
136
+
# c3 /= c3.max()
137
+
print(coefs)
138
+
print(shifts)
139
+
#save(c0, f"{run_dir}/c0_{n:06d}")
140
+
#save(c1, f"{run_dir}/c1_{n:06d}")
141
+
#save(c2, f"{run_dir}/c2_{n:06d}")
142
+
#save(c3, f"{run_dir}/c3_{n:06d}")
143
+
#for s in range(12):
144
+
# with torch.no_grad():
145
+
# src = srcs_scaled[s]
146
+
# src -= src.min()
147
+
# src /= src.max()
148
+
# save(src, f"{run_dir}/s{s}_{n:06d}")
149
+
+106
sketch/old/snakepyt_0_0/labyrinth_chaos.py
+106
sketch/old/snakepyt_0_0/labyrinth_chaos.py
···
1
+
# Thomas' cyclically symmetric attractor / "Labyrinth Chaos"
2
+
# see "Deterministic chaos seen in terms of feedback circuits: Analysis, synthesis, 'labyrinth chaos'" by Renรฉ Thomas
3
+
4
+
import time
5
+
import math
6
+
from pathlib import Path
7
+
8
+
import torch
9
+
10
+
from lib.spaces import insert_at_coords, map_space
11
+
from lib.ode import rk4_step
12
+
from lib.util import *
13
+
14
+
def init():
15
+
schedule(run, None)
16
+
17
+
name = "labyrinth"
18
+
device = "cuda"
19
+
torch.set_default_device(device)
20
+
t_fp = torch.double
21
+
torch.set_default_dtype(t_fp)
22
+
23
+
dissipation = 0.04 # "b" parameter in the literature
24
+
25
+
tau = 3.14159265358979323 * 2
26
+
27
+
particle_count = 50000
28
+
iterations = 1000
29
+
save_if = lambda i: i > 150 and i % 5 == 0
30
+
rotation_rate = tau / 2000
31
+
32
+
# 2 ** 9 = 512; 10 => 1024; 11 => 2048
33
+
scale_power = 10
34
+
35
+
scale = 2560#2 ** scale_power
36
+
origin = 0, 0
37
+
s = 2
38
+
span = 16 * s, 9 * s
39
+
zooms = [
40
+
]
41
+
stretch = 1, 1
42
+
43
+
mapping = map_space(origin, span, zooms, stretch, scale)
44
+
(_, (h,w)) = mapping
45
+
46
+
dt = 0.01
47
+
48
+
49
+
def derivative(p):
50
+
return torch.sin(p[[1,2,0]]) - p * dissipation
51
+
52
+
step = lambda p, dp, h: p + dp * h * dt
53
+
rk4_curried = lambda p: rk4_step(derivative, step, p)
54
+
55
+
p_positions = (torch.rand([3, particle_count], device=device, dtype=t_fp) - 0.5) * 20
56
+
57
+
p_colors = torch.rand([particle_count,3], device=device, dtype=t_fp)
58
+
59
+
color_rotation = torch.linspace(0, tau / 4, particle_count)
60
+
61
+
p_colors[:,0] = (p_positions[0,:] / 20) + 0.5
62
+
p_colors[:,1] = (p_positions[1,:] / 20) + 0.5
63
+
p_colors[:,2] = (p_positions[2,:] / 20) + 0.5
64
+
65
+
def project(p, colors, i):
66
+
c = math.cos(i * rotation_rate)
67
+
s = math.sin(i * rotation_rate)
68
+
rotation = torch.tensor([
69
+
[1, 0, 0],
70
+
[0, c,-s],
71
+
[0, s, c]])
72
+
#rotation = torch.tensor([
73
+
# [ c, 0, s],
74
+
# [ 0, 1, 0],
75
+
# [-s, 0, c]])
76
+
#rotation = torch.tensor([
77
+
# [c, -s, 0],
78
+
# [s, c, 0],
79
+
# [0, 0, 1]])
80
+
alt_colors = colors.clone()
81
+
res = (rotation @ p)
82
+
color_filter = res[2].abs() < 0.7
83
+
alt_colors[:,0] *= color_filter
84
+
alt_colors[:,1] *= color_filter
85
+
alt_colors[:,2] *= color_filter
86
+
return (res[0:2], alt_colors)
87
+
88
+
frame_index = [0]
89
+
90
+
def run():
91
+
run_dir = time.strftime(f"%d.%m.%Y/{name}_t%H.%M.%S")
92
+
Path("out/" + run_dir).mkdir(parents=True, exist_ok=True)
93
+
94
+
scratch = torch.zeros([h, w, 3], device=device, dtype=t_fp)
95
+
96
+
for iteration in range(iterations):
97
+
if save_if(iteration) or iteration == iterations - 1:
98
+
(p_projected, alt_colors) = project(p_positions, p_colors, iteration)
99
+
frame_index[0] += 1
100
+
scratch *= 0
101
+
insert_at_coords(p_projected, alt_colors, scratch, mapping)
102
+
#save(scratch.permute(2,0,1), f"{run_dir}/{frame_index[0]:06d}")
103
+
104
+
p_positions.copy_(rk4_curried(p_positions))
105
+
106
+
+142
sketch/old/snakepyt_0_0/laplace.py
+142
sketch/old/snakepyt_0_0/laplace.py
···
1
+
# laplacian growth
2
+
# see "Fast Simulation of Laplacian Growth" by Theodore Kim, Jason Sewall, Avneesh Sud and Ming C. Lin
3
+
4
+
import itertools
5
+
from math import tau, log
6
+
7
+
import torch
8
+
9
+
from lib.util import load_image_tensor, save, msave
10
+
11
+
12
+
13
+
def init():
14
+
torch.set_default_device("cuda")
15
+
16
+
dims = 2
17
+
#scale = 2 ** 10
18
+
#h = scale
19
+
#w = scale
20
+
margin = 2**4
21
+
22
+
physics_scale = 0.5
23
+
eta = 10.7 # fractal dimension or something
24
+
25
+
max_points = 2 ** 13
26
+
27
+
t_real = torch.half
28
+
29
+
point_positions = torch.zeros([max_points, dims], dtype=t_real)
30
+
point_values = torch.zeros([max_points], dtype=t_real)
31
+
is_charge = torch.zeros([max_points], dtype=torch.bool)
32
+
do_neighbors = torch.zeros([max_points], dtype=torch.bool)
33
+
is_candidate = torch.zeros_like(is_charge)
34
+
is_free = torch.ones_like(is_charge)
35
+
36
+
neighbors = torch.tensor([v for v in itertools.product([-1,0,1], repeat=dims) if any(v)], dtype=t_real)
37
+
38
+
inner_radius = physics_scale / 2
39
+
40
+
def insert_charge(position, value, was_candidate=True, no_neighbors=False):
41
+
if was_candidate: # index could probably be provided by the caller for efficiency
42
+
match_coords = point_positions == position.expand(point_positions.shape)
43
+
match_pos = match_coords.prod(dim=1)
44
+
index = torch.nonzero(is_candidate & match_pos)[0]
45
+
is_candidate[index] = False
46
+
is_charge[index] = True
47
+
do_neighbors[index] = not no_neighbors
48
+
point_values[index] = value
49
+
else:
50
+
# create the charge
51
+
free_index = torch.nonzero(is_free)[0] # maybe not ideal
52
+
is_charge[free_index] = True
53
+
is_free[free_index] = False
54
+
do_neighbors[free_index] = not no_neighbors
55
+
point_positions[free_index] = position
56
+
point_values[free_index] = value
57
+
58
+
# update existing candidate potentials (eqn 11)
59
+
candidate_pos = point_positions[is_candidate]
60
+
dist = (candidate_pos - position.expand(candidate_pos.shape)).norm(p=2, dim=1)
61
+
point_values[is_candidate] += 1 - inner_radius/dist
62
+
63
+
if no_neighbors:
64
+
return
65
+
66
+
# add new candidates
67
+
charge_pos = point_positions[is_charge & do_neighbors]
68
+
for charge_index in range(charge_pos.shape[0]):
69
+
neighborhood = neighbors + charge_pos[charge_index].expand(neighbors.shape)
70
+
for neighbor_index in range(neighborhood.shape[0]):
71
+
maybe_insert_candidate(neighborhood[neighbor_index])
72
+
73
+
def maybe_insert_candidate(position):
74
+
if torch.any((point_positions == position.expand(point_positions.shape)).prod(dim=1)):
75
+
return
76
+
77
+
free_index = torch.nonzero(is_free)[0] # maybe not ideal
78
+
is_candidate[free_index] = True
79
+
is_free[free_index] = False
80
+
point_positions[free_index] = position
81
+
charge_pos = point_positions[is_charge]
82
+
charge_val = point_values[is_charge]
83
+
dist = (charge_pos - position.expand(charge_pos.shape)).norm(p=2, dim=1)
84
+
point_values[free_index] = (1 - inner_radius/dist).sum() # eqn 10
85
+
86
+
def select_candidate(scale):
87
+
try:
88
+
candidate_val = point_values[is_candidate]
89
+
candidate_pos = point_positions[is_candidate]
90
+
max_val = candidate_val.max()
91
+
min_val = candidate_val.min()
92
+
normalized_val = (candidate_val - min_val) / (max_val - min_val) # eqn 13
93
+
val_pow = normalized_val ** eta
94
+
selection_prob = val_pow / val_pow.sum()
95
+
96
+
prob_sum = selection_prob.cumsum(dim=0)
97
+
98
+
choices = []
99
+
for _ in range(scale):
100
+
r = torch.rand(1, dtype=t_real)
101
+
choice = torch.nonzero(prob_sum > r)[0]
102
+
if choice not in choices:
103
+
insert_charge(candidate_pos[choice], 1)
104
+
choices.append(choice)
105
+
except KeyboardInterrupt:
106
+
raise
107
+
except:
108
+
print(f"selection failed")
109
+
110
+
111
+
schedule(run, None)
112
+
113
+
114
+
def run():
115
+
116
+
# set initial conditions
117
+
insert_charge(torch.tensor([0]*dims, dtype=t_real), 1, was_candidate=False)
118
+
for n in range(-40,40):
119
+
insert_charge(torch.tensor([2, n], dtype=t_real), 1, was_candidate=False, no_neighbors=True)
120
+
121
+
schedule(main_loop, None)
122
+
123
+
def main_loop():
124
+
for iteration in range(10000):
125
+
select_candidate(int(log(iteration * 100 + 10)))
126
+
if iteration % 10 == 0:
127
+
final_charges = point_positions[is_charge].clone()
128
+
129
+
for d in range(dims):
130
+
final_charges[:,d] -= final_charges[:,d].min()
131
+
final_charges[:,d] += margin
132
+
133
+
h = int(final_charges[:,0].max().item() + margin)
134
+
w = int(final_charges[:,1].max().item() + margin)
135
+
136
+
indices = final_charges.long()
137
+
138
+
canvas = torch.ones([h,w], dtype=t_real)
139
+
canvas[(indices[:,0], indices[:,1])] = 0
140
+
141
+
msave(canvas, f"{run_dir}/{iteration:06d}")
142
+
+181
sketch/old/snakepyt_0_0/laplace_b.py
+181
sketch/old/snakepyt_0_0/laplace_b.py
···
1
+
2
+
from random import randint
3
+
from math import cos, sin, tau
4
+
5
+
import torch
6
+
from torch.nn.functional import conv2d, softmax
7
+
8
+
from lib.spaces import xrange_yrange, grid
9
+
from lib.util import msave, save
10
+
11
+
def init():
12
+
device = "cuda"
13
+
torch.set_default_device(device)
14
+
15
+
t_real = torch.double
16
+
torch.set_default_dtype(t_real)
17
+
18
+
19
+
scale = 2**10
20
+
h = scale
21
+
w = scale
22
+
23
+
charge_size = 50
24
+
25
+
iterations = 100001
26
+
selection_range = 0.05
27
+
growth_limit = 100
28
+
29
+
seed_pts = 10
30
+
eta = 5
31
+
32
+
span = (10, 10)
33
+
region = xrange_yrange((0,0), span)
34
+
35
+
neighbor_kernel = torch.ones([1,1,3,3],dtype=t_real)
36
+
neighbor_kernel[0,0,1,1] = 0
37
+
def neighbors(positions):
38
+
conv = conv2d(positions.unsqueeze(0).unsqueeze(0), neighbor_kernel, bias=None, padding="same", stride=[1])[0,0]
39
+
return ((conv > 0) & (positions == 0))
40
+
41
+
42
+
schedule(growth, None)
43
+
44
+
def point_charge(position, positions, potentials, single_charge, no_seed=False, inverse=False, strength=1):
45
+
if not no_seed:
46
+
positions[position[0],position[1]] += 1
47
+
if inverse:
48
+
potentials.add_(torch.roll((1-single_charge)*strength, shifts=position, dims=(0,1)))
49
+
else:
50
+
potentials.add_(torch.roll(single_charge*strength, shifts=position, dims=(0,1)))
51
+
52
+
53
+
def growth():
54
+
potentials = torch.zeros([h,w], dtype=t_real)
55
+
positions = grid((region, (h,w)))
56
+
#positions[:,:,0] -= (2*span[0]) / ((2 * h))
57
+
#positions[:,:,1] -= (2*span[1]) / ((2 * w))
58
+
single_charge_potential = 1 - (charge_size / (positions[:,:,0]**2 + positions[:,:,1]**2)**0.5)
59
+
#msave(single_charge_potential / single_charge_potential.max(), f"{run_dir}/scp")
60
+
single_charge_potential = torch.roll(single_charge_potential, shifts=(h//2, w//2), dims=(0,1))
61
+
62
+
63
+
positions = torch.zeros([h,w], dtype=t_real)
64
+
65
+
#r = torch.rand([seed_pts,2]) * (h) #+ ((h//2))
66
+
#r = r.long()
67
+
#for i in range(seed_pts):
68
+
# point_charge((r[i,0].item(), r[i,1].item()), positions, potentials, single_charge_potential)
69
+
70
+
pts = [
71
+
#(h//2 + 1, w//2),
72
+
#(h//2 - 1, w//2)
73
+
#(h//2,w//2)
74
+
(0,w//2)
75
+
]
76
+
77
+
for p in pts:
78
+
point_charge((p[0], p[1]), positions, potentials, single_charge_potential)
79
+
80
+
schedule(loop, None)
81
+
82
+
83
+
def loop():
84
+
canvas = torch.zeros([3,h,w], dtype=t_real)
85
+
86
+
#U = 80
87
+
#for u in range(U//2):
88
+
#rad = u*h//U
89
+
#n_pts = int(tau*rad)
90
+
#for p in range(n_pts):
91
+
# x = int(cos(tau*p/n_pts) * (rad) + (h//2))
92
+
# y = int(sin(tau*p/n_pts) * (rad) + (h//2))
93
+
# point_charge((x,y), positions, potentials, single_charge_potential, True, False, 5)
94
+
95
+
#rad = int((u+0.5)*(h//U)*10)
96
+
#n_pts = int(tau*rad)
97
+
#for p in range(n_pts):
98
+
# x = int(cos(tau*p/n_pts) * (rad) + (h//4))
99
+
# y = int(sin(tau*p/n_pts) * (rad) + (h//4))
100
+
# point_charge((x,y), positions, potentials, single_charge_potential, True, True, 1)
101
+
102
+
103
+
for p in range(h):
104
+
point_charge((h-1, p), positions, potentials, single_charge_potential, True, False, 2)
105
+
#for p in range(h):
106
+
# point_charge((p, 4 * h//10 - 1), positions, potentials, single_charge_potential, True, False, 8)
107
+
# point_charge((p, 6 * h//10 + 1), positions, potentials, single_charge_potential, True, False, 8)
108
+
109
+
rad = h//10
110
+
n_pts = int(tau*rad)
111
+
for p in range(n_pts):
112
+
x = int(cos(tau*p/n_pts) * rad + h//2 - h//5)
113
+
x2 = int(cos(tau*p/n_pts) * rad + h//2)
114
+
x3 = int(cos(tau*p/n_pts) * rad + h//2 + h//5)
115
+
y = int(sin(tau*p/n_pts) * rad + h//2)
116
+
point_charge((x,y), positions, potentials, single_charge_potential, True, True, 2)
117
+
point_charge((x2,y), positions, potentials, single_charge_potential, True, True, 2)
118
+
point_charge((x3,y), positions, potentials, single_charge_potential, True, True, 2)
119
+
120
+
121
+
for iteration in range(iterations):
122
+
123
+
candidates = neighbors(positions)
124
+
125
+
cand_values = (potentials * candidates)
126
+
cand_values[cand_values==0] = torch.inf
127
+
128
+
cand_values -= cand_values.min() # infs necessary to keep min from being zero
129
+
cand_values = torch.nan_to_num(cand_values, 0, 0, 0) # remove infs
130
+
#msave(cand_values, f"{run_dir}/candv_{iteration}")
131
+
cand_values /= cand_values.max()
132
+
133
+
#msave(cand_values, f"{run_dir}/cand_{iteration:06d}")
134
+
#cand_values *= torch.rand_like(cand_values)
135
+
#cand_probs = softmax(cand_values)
136
+
cand_probs = cand_values
137
+
cand_probs.pow_(eta)
138
+
#cand_probs.mul_(0.9 + 0.1 * torch.rand_like(cand_probs))
139
+
140
+
#selected = torch.multinomial(cand_probs, 3, replacement=False)
141
+
142
+
selected = torch.argwhere((cand_probs > ((1 - selection_range) * cand_probs.max())) & (cand_probs > 0))
143
+
144
+
145
+
print(selected.shape[0])
146
+
if selected.shape[0] > 0:
147
+
r = randint(0,selected.shape[0] - 1)
148
+
selected = torch.roll(selected, shifts=r, dims=0)
149
+
150
+
#print(selected.shape)
151
+
152
+
#ys = (selected // w)
153
+
#xs = (selected % h)
154
+
#point_charge((x,y), positions, potentials, single_charge_potential)
155
+
156
+
if iteration % 100 == 0:
157
+
canvas[2] = potentials
158
+
canvas[2] -= canvas[2].min()
159
+
canvas[2] /= canvas[2].max()
160
+
canvas[2] = 1 - canvas[2]
161
+
#canvas[0] = canvas[0] ** 5
162
+
#/ potentials.max())**10
163
+
#canvas[1] = cand_values
164
+
#canvas[2] = single_charge_potential
165
+
#canvas[0] = positions
166
+
#msave(single_charge_potential, f"{run_dir}/scp_{iteration:06d}")
167
+
#msave(candidates, f"{run_dir}/cand_{iteration}")
168
+
#msave(potentials / potentials.max(), f"{run_dir}/pot_{iteration}")
169
+
#msave(potentials / potentials.max(), f"{run_dir}/{iteration:06d}")
170
+
save(canvas, f"{run_dir}/{iteration:06d}")
171
+
172
+
ys = selected[:,0]
173
+
xs = selected[:,1]
174
+
175
+
n = selected.shape[0]
176
+
if n > growth_limit: n = growth_limit
177
+
178
+
for index in range(n):
179
+
inds = ys[index].item(), xs[index].item()
180
+
point_charge(inds, positions, potentials, single_charge_potential)
181
+
+242
sketch/old/snakepyt_0_0/lyapunov.py
+242
sketch/old/snakepyt_0_0/lyapunov.py
···
1
+
import torch
2
+
import time
3
+
import math
4
+
import itertools
5
+
import sys
6
+
import subprocess
7
+
from random import random
8
+
9
+
from PIL import Image
10
+
11
+
from lib.util import *
12
+
from lib.spaces import map_space, cgrid, center_span, apply_zooms
13
+
from lib.io import VideoWriter
14
+
15
+
zoom_plan = [
16
+
((0.6, 1.0), (0.6, 1.0)),
17
+
((0.7, 0.8), (0.6, 0.7)),
18
+
((0.7, 0.8), (0.8, 0.9)),
19
+
((0.7, 0.8), (0.2, 0.3)),
20
+
((0.5, 0.6), (0.4, 0.5)),
21
+
((0.3, 0.4), (0.6, 0.7)),
22
+
((0.0, 0.1), (0.0, 0.1)),
23
+
((0.4, 0.5), (0.6, 0.7)),
24
+
((0.2, 0.3), (0.7, 0.8)),
25
+
((0.8, 0.9), (0.7, 0.8)),
26
+
((0.1, 0.2), (0.4, 0.5)),
27
+
((0.6, 0.7), (0.7, 0.8)),
28
+
((0.0, 0.1), (0.3, 0.4)),
29
+
#((0.7, 0.8), (0.4, 0.5)),
30
+
#((0.0, 0.1), (0.6, 0.7)),
31
+
#((0.6, 0.7), (0.3, 0.4)),
32
+
#((0.4, 0.5), (0.7, 0.8)),
33
+
#((0.6, 0.7), (0.3, 0.4)),
34
+
#((0.2, 0.3), (0.1, 0.2)),
35
+
#((0.7, 0.8), (0.1, 0.2)),
36
+
#((0.1, 0.2), (0.6, 0.7)),
37
+
#((0.7, 0.8), (0.3, 0.4)),
38
+
#((0.9, 1.0), (0.8, 0.9)),
39
+
]
40
+
41
+
planning_mode = False
42
+
smoothed_path = True
43
+
44
+
def init():
45
+
frame_index = [0]
46
+
47
+
_origin = 0, 0
48
+
_span = 9, 9
49
+
mapping = map_space(_origin, _span, [], (), scale)
50
+
(_, dims) = mapping
51
+
52
+
53
+
#video = VideoWriter(dims, 24,
54
+
# f"out/{run_dir}/vid.mp4",
55
+
# f"out/{run_dir}/ffmpeg_log")
56
+
57
+
if planning_mode:
58
+
zooms = zoom_plan
59
+
schedule(run, None)
60
+
else:
61
+
if smoothed_path:
62
+
frames = 120#720
63
+
schedule(smooth_zoom_interp, range(frames))
64
+
else:
65
+
schedule(zoom_level, range(len(zoom_plan)-1))
66
+
67
+
def final():
68
+
pass
69
+
#video.close()
70
+
71
+
def zoom_level():
72
+
zoom_plan_partial = zoom_plan[:index+2]
73
+
74
+
frames = 10
75
+
76
+
schedule(zoom_interp, range(frames))
77
+
78
+
79
+
80
+
def zoom_interp(index):
81
+
((xl, xr), (yl, yr)) = zoom_plan_partial[-1]
82
+
#t = index / frames
83
+
t = math.log(index + 1, frames+1)
84
+
85
+
zooms = list(zoom_plan_partial)
86
+
zooms[-1] = ((lerp(0, xl, t), lerp(1, xr, t)), (lerp(0, yl, t), lerp(1, yr, t)))
87
+
88
+
89
+
schedule(run, None)
90
+
91
+
def eerp(a,b,t):
92
+
return math.pow(b/a, t)*a
93
+
94
+
def lerp(a,b,t):
95
+
return (1-t) * a + t * b
96
+
97
+
def smooth_zoom_interp(index):
98
+
final_center, final_span = center_span(*apply_zooms(_origin, _span, zoom_plan))
99
+
zooms = []
100
+
101
+
t = (index / frames)#math.log(index + 1, frames+1)
102
+
103
+
span = (eerp(_span[0], final_span[0], t), eerp(_span[1], final_span[1], t))
104
+
105
+
t = (span[0] - _span[0]) / (final_span[0] - _span[0])
106
+
107
+
origin = (lerp(_origin[0], final_center[0], t), lerp(_origin[1], final_center[1], t))
108
+
109
+
schedule(run, None)
110
+
111
+
# 2 ** 9 = 512; 10 => 1024; 11 => 2048
112
+
scale = 2 ** 11
113
+
114
+
#origin = 3.80576, 3.8096
115
+
#span = 0.01309, 0.00892
116
+
117
+
#origin, span = center_span((3.795, 3.824), (3.80287, 3.82417))
118
+
#origin, span = center_span((3.8097, 3.8175), (3.8138, 3.8181))
119
+
120
+
121
+
stretch = 1, 1
122
+
123
+
124
+
dev = "cuda"
125
+
t_fp = torch.double
126
+
t_cfp = torch.cdouble
127
+
128
+
torch.set_default_device(dev)
129
+
torch.set_default_dtype(t_fp)
130
+
131
+
seq = "BBABABA"
132
+
c = 3.5
133
+
x_init = 0.499
134
+
alpha = 0.907
135
+
136
+
iterations = 1000
137
+
138
+
discontinuity = True
139
+
140
+
flip_mode = "individual"
141
+
flip_chance = 0.00#1
142
+
143
+
# funcs
144
+
145
+
def logistic_map(r, x):
146
+
x_inf_mask = torch.isinf(x)
147
+
_x = r * x * (1 - x)
148
+
return torch.nan_to_num(_x) * ~x_inf_mask - x * x_inf_mask
149
+
150
+
def lyapunov_term(r, x):
151
+
x_inf_mask = torch.isinf(x)
152
+
term = torch.log(torch.abs(r * (1 - 2 * x)))
153
+
return torch.nan_to_num(term) * ~x_inf_mask - x * x_inf_mask
154
+
155
+
def opposite(element):
156
+
if element == "A":
157
+
return "B"
158
+
else:
159
+
return "A"
160
+
161
+
def repeat(seq, flip_chance, flip_mode):
162
+
i = 0
163
+
l = len(seq)
164
+
flipped = False
165
+
while True:
166
+
yield seq[i%l] if not flipped else opposite(seq[i%l])
167
+
i += 1
168
+
if i % l == 0 or flip_mode == "individual":
169
+
flipped = random() < flip_chance
170
+
171
+
def c_avg(prev, val, n):
172
+
"""cumulative average
173
+
prev: previous result (should be zero for n=1)
174
+
val: next value
175
+
n: how many values given so far, including this value"""
176
+
prev_inf_mask = torch.isinf(prev)
177
+
_next = prev + (val - prev) / n
178
+
return torch.nan_to_num(_next) * ~prev_inf_mask + prev * prev_inf_mask
179
+
180
+
181
+
182
+
def run():
183
+
mapping = map_space(origin, span, zooms, stretch, scale)
184
+
(_, (h,w)) = mapping
185
+
186
+
x = torch.ones([h, w]) * x_init
187
+
188
+
ab = cgrid(mapping)
189
+
190
+
tenths = torch.zeros([h, w])
191
+
for i in range(10):
192
+
frac = i / 10
193
+
tenths[math.floor(frac*h), :] = 1
194
+
tenths[:, math.floor(frac*w)] = 1
195
+
196
+
lyapunov_avg = torch.zeros([h, w])
197
+
198
+
gen = itertools.islice(repeat(seq, 0, flip_mode), iterations)
199
+
200
+
flip = False
201
+
for idx, seq_elem in enumerate(gen):
202
+
if seq_elem == "C":
203
+
r = c
204
+
else:
205
+
r_normal = ab.real if seq_elem == "A" else ab.imag
206
+
if flip_chance > 0:
207
+
r_flipped = ab.imag if seq_elem == "A" else ab.real
208
+
flip_mask = torch.rand([h,w]) < flip_chance
209
+
r = r_normal * ~flip_mask + r_flipped * flip_mask
210
+
else:
211
+
r = r_normal
212
+
213
+
if idx > 600:
214
+
lyapunov_avg = c_avg(lyapunov_avg, lyapunov_term(r, x), idx)
215
+
216
+
if idx < iterations - 1:
217
+
if discontinuity:
218
+
mask = x <= 0.5
219
+
x = logistic_map(r, x)
220
+
if discontinuity:
221
+
x += mask * (alpha - 1) * (r - 2) / 4
222
+
223
+
#msave(torch.tanh(lyapunov_avg / 4) / 2 + 0.5, f"avg_{idx}")
224
+
225
+
result = torch.tanh(lyapunov_avg)
226
+
result -= result.mean()
227
+
result /= 5 * result.std()
228
+
result += 0.5
229
+
230
+
if planning_mode:
231
+
result = torch.stack([result + tenths, result, result])
232
+
save(result, f"{run_dir}/{frame_index[0]:06d}")
233
+
else:
234
+
msave(result, f"{run_dir}/{frame_index[0]:06d}")
235
+
#video.mframe(result)
236
+
frame_index[0] += 1
237
+
#msave(1 - result**2, final_file + "sq")
238
+
#msave(1 - torch.sqrt(result), final_file + "sqrt")
239
+
#msave(result ** 2, final_file + "sq_p")
240
+
#msave(torch.sqrt(result), final_file + "sqrt_p")
241
+
242
+
+166
sketch/old/snakepyt_0_0/ode.py
+166
sketch/old/snakepyt_0_0/ode.py
···
1
+
# Thomas' cyclically symmetric attractor / "Labyrinth Chaos"
2
+
# see "Deterministic chaos seen in terms of feedback circuits: Analysis, synthesis, 'labyrinth chaos'" by Renรฉ Thomas
3
+
4
+
import time
5
+
import math
6
+
from pathlib import Path
7
+
8
+
import torch
9
+
10
+
from lib.spaces import insert_at_coords, map_space
11
+
from lib.ode import rk4_step
12
+
from lib.util import *
13
+
14
+
def init():
15
+
schedule(run, None)
16
+
17
+
device = "cuda"
18
+
torch.set_default_device(device)
19
+
t_fp = torch.double
20
+
torch.set_default_dtype(t_fp)
21
+
22
+
dissipation = 0.04 # "b" parameter in the literature
23
+
24
+
tau = 3.14159265358979323 * 2
25
+
26
+
particle_count = 50000
27
+
iterations = 1000
28
+
save_if = lambda i: True #i > 150 and i % 5 == 0
29
+
rotation_rate = tau / 2000
30
+
31
+
# 2 ** 9 = 512; 10 => 1024; 11 => 2048
32
+
scale_power = 10
33
+
34
+
scale = 2 ** scale_power
35
+
origin = 0, 0
36
+
s = 4
37
+
span = s, s
38
+
zooms = [
39
+
]
40
+
stretch = 1, 1
41
+
42
+
mapping = map_space(origin, span, zooms, stretch, scale)
43
+
(_, (h,w)) = mapping
44
+
45
+
dt = 1
46
+
47
+
def proj(u, v):
48
+
dot = (u * v).sum(dim=1)
49
+
scale = (dot / (v.norm(p=2, dim=1) ** 2))
50
+
out = v.clone()
51
+
for dim in range(3):
52
+
out[:,dim] *= scale
53
+
return out
54
+
55
+
def proj_shift(a, b):
56
+
return a + proj(b, a)
57
+
58
+
def get_transformation(direction, flow, ebb, rescaling):
59
+
def transformation(p):
60
+
#return blus(p, ones) - ones
61
+
#if randomize:
62
+
# direction = flow * torch.polar(ones.real, (random() * random_range - random_range / 2) * ones.real)
63
+
if flow == 0:
64
+
result = p - direction * ebb
65
+
else:
66
+
result = proj_shift(p, direction * flow) - direction * ebb
67
+
#res_abs = result.abs()
68
+
if rescaling:
69
+
result /= result.norm(p=2,dim=1).max()
70
+
#result = torch.polar(2 * res_abs / res_abs.max(), result.angle())
71
+
return result
72
+
return transformation
73
+
74
+
def _deriv(b, c):
75
+
def derivative(p):
76
+
return p * ((b * p).sum(dim=0)) - c * b
77
+
return derivative
78
+
79
+
def gpt_deriv(b, k=1.0, c=0.1, omega=0.5):
80
+
"""
81
+
Returns a function deriv(points) -> [3,N] where points are 3D column vectors.
82
+
b: tensor of shape [3] (will be normalized internally)
83
+
k: scalar gain for tanh nonlinearity
84
+
c: scalar drift magnitude along -b
85
+
omega: scalar rotation strength around b
86
+
"""
87
+
b = b / b.norm() # normalize once
88
+
89
+
def deriv(points):
90
+
# <a, b> for each column
91
+
#dots = torch.einsum('ij,i->j', points, b) # shape [N]
92
+
dots = (points * b[:]).sum(dim=0) # shape [N]
93
+
94
+
# radial scaling term
95
+
radial = torch.tanh(k * dots).unsqueeze(0) * points # [3,N]
96
+
97
+
# drift along -b
98
+
drift = -c * b[:].expand_as(points) # [3,N]
99
+
100
+
101
+
# rotation around b: b ร a
102
+
rotation = omega * torch.cross(b[:].expand_as(points), points, dim=0)
103
+
104
+
return radial + drift + rotation
105
+
106
+
return deriv
107
+
108
+
p_positions = (torch.rand([3, particle_count], device=device, dtype=t_fp) - 0.5) * 3
109
+
#p_positions[2] = 0
110
+
direction = torch.zeros_like(p_positions) / math.sqrt(3)
111
+
direction[1] += 1
112
+
113
+
#derivative = _deriv(direction, 0.5)
114
+
derivative = gpt_deriv(direction, 1.0, 0.1, 0.5)
115
+
116
+
step = lambda p, dp, h: p + dp * h * dt
117
+
rk4_curried = lambda p: rk4_step(derivative, step, p)
118
+
119
+
120
+
p_colors = torch.rand([particle_count,3], device=device, dtype=t_fp)
121
+
122
+
color_rotation = torch.linspace(0, tau / 4, particle_count)
123
+
124
+
p_colors[:,0] = (p_positions[0,:] / 2) + 0.5
125
+
p_colors[:,1] = (p_positions[1,:] / 2) + 0.5
126
+
p_colors[:,2] = (p_positions[2,:] / 2) + 0.5
127
+
128
+
def project(p, colors, i):
129
+
c = math.cos(i * rotation_rate)
130
+
s = math.sin(i * rotation_rate)
131
+
rotation = torch.tensor([
132
+
[1, 0, 0],
133
+
[0, c,-s],
134
+
[0, s, c]])
135
+
#rotation = torch.tensor([
136
+
# [ c, 0, s],
137
+
# [ 0, 1, 0],
138
+
# [-s, 0, c]])
139
+
#rotation = torch.tensor([
140
+
# [c, -s, 0],
141
+
# [s, c, 0],
142
+
# [0, 0, 1]])
143
+
alt_colors = colors.clone()
144
+
res = (rotation @ p)
145
+
#color_filter = res[2].abs() < 0.7
146
+
#alt_colors[:,0] *= color_filter
147
+
#alt_colors[:,1] *= color_filter
148
+
#alt_colors[:,2] *= color_filter
149
+
return (res[0:2], alt_colors)
150
+
151
+
frame_index = [0]
152
+
153
+
def run():
154
+
scratch = torch.zeros([h, w, 3], device=device, dtype=t_fp)
155
+
156
+
for iteration in range(iterations):
157
+
if save_if(iteration) or iteration == iterations - 1:
158
+
(p_projected, alt_colors) = project(p_positions, p_colors, iteration)
159
+
frame_index[0] += 1
160
+
scratch *= 0
161
+
insert_at_coords(p_projected, alt_colors, scratch, mapping)
162
+
save(scratch.permute(2,0,1), f"{run_dir}/{frame_index[0]:06d}")
163
+
164
+
p_positions.copy_(rk4_curried(p_positions))
165
+
166
+
+164
sketch/old/snakepyt_0_0/orbits.py
+164
sketch/old/snakepyt_0_0/orbits.py
···
1
+
2
+
import math
3
+
4
+
import torch
5
+
6
+
from lib.util import save
7
+
8
+
def legendre_p(l, x):
9
+
P = (torch.ones_like(x), x)
10
+
if l < 2:
11
+
return P[l]
12
+
for n in range(2, l+1):
13
+
p1_term = (2*n - 1) * x * P[1]
14
+
p0_term = (n - 1) * P[0]
15
+
Pnext = (p1_term - p0_term) / n
16
+
P = (P[1], Pnext)
17
+
return P[1]
18
+
19
+
def associated_legendre(l, m, x):
20
+
def derivative(f, degree, dx=1e-5):
21
+
for iteration in range(degree):
22
+
f = lambda x, f=f, dx=dx: (f(x+dx) - f(x-dx)) / (2 * dx)
23
+
return f
24
+
25
+
p = lambda x: legendre_p(l, x)
26
+
dp = derivative(p, m)
27
+
return (-1)**m * (1 - x**2)**(m / 2) * dp(x)
28
+
29
+
def spherical_harmonic(l, m, theta, phi):
30
+
x = torch.cos(theta)
31
+
p = associated_legendre(l, abs(m), x)
32
+
if m == 0:
33
+
return p * (1 + 0j)
34
+
return p * torch.exp(1j * m * phi)
35
+
36
+
def generalized_laguerre(k, a, x):
37
+
total = torch.zeros_like(x)
38
+
for i in range(k + 1):
39
+
numer = math.factorial(k + a)
40
+
denom = math.factorial(k - i) * math.factorial(a + i) * math.factorial(i)
41
+
total += ((-x)**i) * (numer / denom)
42
+
return total
43
+
44
+
def radial_wavefunc(n, l, r):
45
+
rho = 2 * r / n
46
+
laguerre = generalized_laguerre(n - l - 1, 2 * l + 1, rho)
47
+
return r**l * torch.exp(-rho / 2) * laguerre
48
+
49
+
def orbital_wavefunc(n, l, m, r, theta, phi):
50
+
R = radial_wavefunc(n, l, r)
51
+
Y = spherical_harmonic(l, m, theta, phi)
52
+
return R * Y
53
+
54
+
def sphericalize(coords):
55
+
r = coords.norm(p=2,dim=0)
56
+
theta = torch.acos(coords[2]/r)#torch.clamp(coords[2] / r, -1.0, 0.0))
57
+
phi = torch.atan2(coords[1], coords[0])
58
+
return r, theta, phi
59
+
60
+
61
+
def init():
62
+
torch.set_default_device("cuda")
63
+
64
+
h = 2**9
65
+
w = 2**9
66
+
d = 2**9
67
+
68
+
scale = 20#0
69
+
70
+
rx = -scale, scale
71
+
ry = -scale, scale
72
+
rz = -scale, scale
73
+
74
+
coords = torch.zeros([3, h, w, d])
75
+
76
+
xspace = torch.linspace(rx[0], rx[1], w, dtype=torch.double)
77
+
yspace = torch.linspace(ry[0], ry[1], h, dtype=torch.double)
78
+
zspace = torch.linspace(rz[0], rz[1], d, dtype=torch.double)
79
+
80
+
coords[0] = xspace.view([w,1,1]).expand([w,h,d]).permute(1,0,2)
81
+
coords[1] = yspace.view([h,1,1]).expand([h,w,d])
82
+
coords[2] = zspace.view([d,1,1]).expand([d,h,w]).permute(1,2,0)
83
+
84
+
schedule(per_n, [9])
85
+
#schedule(per_n, range(1,6))
86
+
87
+
def per_n(n):
88
+
schedule(per_l, range(n))
89
+
#schedule(per_l, [3])
90
+
#schedule(per_l, range(-5,6))
91
+
92
+
def per_l(l):
93
+
schedule(per_m, range(l+1))
94
+
#schedule(per_m, range(-l,l+1))
95
+
#schedule(per_m, [3,2])
96
+
#schedule(per_m, range(-5,6))
97
+
98
+
def per_m(m):
99
+
#schedule(per_frame, range(60*4))
100
+
schedule(per_frame, [0])
101
+
102
+
def per_frame(frame_index):
103
+
torch.cuda.empty_cache()
104
+
105
+
rotation_rate = 1 * math.tau / 60
106
+
rotation_rate_b = 1 * math.tau / 60
107
+
rotation_rate_c = 1 * math.tau / 60
108
+
109
+
rotation_rate = 0 if frame_index > 60 and frame_index < 180 else rotation_rate
110
+
rotation_rate_b = 0 if frame_index < 60 or frame_index > 120 else rotation_rate_b
111
+
rotation_rate_c = 0 if frame_index < 120 else rotation_rate_c
112
+
113
+
c = math.cos(frame_index * rotation_rate)
114
+
s = math.sin(frame_index * rotation_rate)
115
+
rotation = torch.tensor([
116
+
[1, 0, 0],
117
+
[0, c,-s],
118
+
[0, s, c]])
119
+
c = math.cos(frame_index * rotation_rate_b)
120
+
s = math.sin(frame_index * rotation_rate_b)
121
+
rotation_b = torch.tensor([
122
+
[ c, 0, s],
123
+
[ 0, 1, 0],
124
+
[-s, 0, c]])
125
+
c = math.cos(frame_index * rotation_rate_c)
126
+
s = math.sin(frame_index * rotation_rate_c)
127
+
rotation_c = torch.tensor([
128
+
[c, -s, 0],
129
+
[s, c, 0],
130
+
[0, 0, 1]])
131
+
132
+
133
+
_coords = (coords.permute(1,2,3,0) @ (rotation_c @ rotation_b @ rotation).T).permute(3,0,1,2)
134
+
135
+
_coords *= n
136
+
137
+
r, theta, phi = sphericalize(_coords)
138
+
res = orbital_wavefunc(n, l, m, r, theta, phi)
139
+
for dim in [0]:
140
+
141
+
if m > 0:
142
+
res_c = res.real.abs() * res.imag
143
+
res_pos = res_c.clamp(0,torch.inf).sum(dim=dim)
144
+
res_pos /= res_pos.abs().max()
145
+
res_neg = (-res_c).clamp(0,torch.inf).sum(dim=dim)
146
+
res_neg /= res_neg.abs().max()
147
+
148
+
res_r = res_pos#.pow(0.3)
149
+
res_b = res_neg#.pow(0.3)
150
+
151
+
else:
152
+
res_r = res.real.clamp(0,torch.inf).sum(dim=dim)
153
+
res_b = (-res.real).clamp(0,torch.inf).sum(dim=dim)
154
+
155
+
res_r /= res_r.max()
156
+
res_b /= res_b.max()
157
+
158
+
res_g = torch.zeros_like(res_r)
159
+
160
+
res_rgb = torch.stack((res_r, res_g, res_b))
161
+
162
+
save(res_rgb, f"{run_dir}/{n}_{l}_{m}_view{dim}_{frame_index:06d}")
163
+
del r, theta, phi, res, res_r, res_g, res_b
164
+
+184
sketch/old/snakepyt_0_0/physarum.py
+184
sketch/old/snakepyt_0_0/physarum.py
···
1
+
import time
2
+
from random import randint
3
+
from pathlib import Path
4
+
from math import tau
5
+
import math
6
+
7
+
import torch
8
+
from torchvision.transforms import GaussianBlur
9
+
10
+
from lib.util import *
11
+
from lib.io import VideoWriter
12
+
13
+
def init():
14
+
scale = 2 ** 12
15
+
w = scale
16
+
h = scale
17
+
18
+
max_iterations = 2000
19
+
save_mod = 100
20
+
also_save = []
21
+
22
+
particle_count = int((w * h) * 3)
23
+
decay_rate = 0.03
24
+
25
+
view_angle = tau / 10
26
+
view_distance = 1.414 * 10
27
+
28
+
direction_count = 40000
29
+
turn_amount = tau / 10#direction_count
30
+
move_distance = 1.414 * 1
31
+
32
+
blur_size = 3
33
+
blur_sigma = 1.5
34
+
35
+
r_strength = 0.8
36
+
b_strength = 50
37
+
g_strength = 0.03
38
+
39
+
dtype = torch.double
40
+
ctype = torch.cdouble
41
+
device = "cuda"
42
+
torch.set_default_device(device)
43
+
torch.set_default_dtype(dtype)
44
+
45
+
video = VideoWriter((h,w), 60,
46
+
f"out/{run_dir}/vid.mp4",
47
+
f"out/{run_dir}/ffmpeg_log")
48
+
49
+
schedule(run, None)
50
+
51
+
def final():
52
+
video.close()
53
+
54
+
def offset_coord(position, angle, distance):
55
+
res = torch.clone(position)
56
+
res[:,0] += (torch.sin(angle) * distance)
57
+
res[:,1] += (torch.cos(angle) * distance)
58
+
return res
59
+
60
+
def clamp_coords(t, h, w):
61
+
t[:,0] = (t[:,0] + (h-1)) % (h-1)
62
+
t[:,1] = (t[:,1] + (w-1)) % (w-1)
63
+
64
+
def idxput(tensor, indices, value):
65
+
#tensor.index_put_((indices[:,0],indices[:,1]), value)
66
+
tensor[:,indices[:,0],indices[:,1]] += value
67
+
68
+
def run():
69
+
70
+
blur = GaussianBlur(blur_size, blur_sigma)
71
+
72
+
# p_ denotes particle data
73
+
p_positions = torch.rand([particle_count,2]) - 0.5
74
+
p_positions[:,0] *= h / 10
75
+
p_positions[:,1] *= w / 10
76
+
p_positions[:,0] += h / 10
77
+
p_positions[:,1] += w / 2
78
+
79
+
#stimulus_positions_r = torch.tensor([
80
+
# [h//3,w//3],
81
+
#], dtype=torch.long)
82
+
83
+
stimulus_positions_r = torch.rand([1000,2]) - 0.5
84
+
stimulus_positions_r[:,0] *= h #* 0.9
85
+
stimulus_positions_r[:,1] *= w #* 0.55
86
+
#stimulus_positions_r[0:100,1] *= 0.02
87
+
#stimulus_positions_r[0:100,0] *= 1 / 0.9
88
+
stimulus_positions_r[:,0] += h / 2
89
+
stimulus_positions_r[:,1] += w / 2
90
+
91
+
#stimulus_positions_b = torch.tensor([
92
+
# [h//3,2*w//3],
93
+
# [2*h//3,w//3],
94
+
#], dtype=torch.long)
95
+
96
+
stimulus_positions_b = torch.rand([100,2]) - 0.5
97
+
stimulus_positions_b[:,0] *= h * 0.6
98
+
stimulus_positions_b[:,1] *= w #* 0.5
99
+
stimulus_positions_b[:,0] += h / 2
100
+
stimulus_positions_b[:,1] += w / 2
101
+
102
+
p_directions = ((torch.rand(particle_count)*direction_count).floor() / direction_count) * tau
103
+
104
+
world = torch.zeros([3,h,w], dtype=dtype)
105
+
scratch = torch.zeros([3,h,w], dtype=dtype)
106
+
black = torch.tensor([0, 0, 0], dtype=dtype)[:,None]
107
+
white = torch.tensor([1, 1, 1], dtype=dtype)[:,None]
108
+
red = torch.tensor([1, 0, 0], dtype=dtype)[:, None]
109
+
green = torch.tensor([0, 1, 0], dtype=dtype)[:, None]
110
+
blue = torch.tensor([0, 0, 1], dtype=dtype)[:, None]
111
+
112
+
for iteration in range(max_iterations):
113
+
114
+
# sense
115
+
angles = (-view_angle, 0, view_angle)
116
+
angles = (p_directions + a for a in angles)
117
+
sensor_coords = (offset_coord(p_positions, a, view_distance) for a in angles)
118
+
sc = [sc for sc in sensor_coords]
119
+
for c in sc:
120
+
clamp_coords(c, h, w)
121
+
122
+
sci = [c.long() for c in sc]
123
+
124
+
#senses = [world[0].take(c[:,0] * w + c[:,1]) for c in sci]
125
+
senses = [world[:,c[:,0], c[:,1]] for c in sci]
126
+
127
+
senses = [30 * s[2] + s[1] - (500 * s[0]) for s in senses]
128
+
129
+
sense_neg = senses[0] > senses[1]
130
+
sense_pos = senses[2] > senses[1]
131
+
#sense_mid = ~sense_neg * ~sense_pos # forward highest
132
+
sense_out = sense_neg * sense_pos # forward lowest
133
+
sense_neg = sense_neg * ~sense_out # negative lowest
134
+
sense_pos = sense_pos * ~sense_out # negative highest
135
+
136
+
#sense_out = sense_out[1] & ~sense_out[0]
137
+
#sense_pos = sense_pos[1] & ~sense_neg[0]
138
+
#sense_neg = sense_neg[1] & ~sense_pos[0]
139
+
140
+
# rotate
141
+
#p_directions += torch.rand(particle_count) * sense_out * turn_amount
142
+
mask = torch.rand(particle_count) > 0.5
143
+
p_directions += mask * sense_out * turn_amount
144
+
p_directions -= ~mask * sense_out * turn_amount
145
+
p_directions += sense_pos * turn_amount
146
+
p_directions -= sense_neg * turn_amount
147
+
148
+
p_directions = ((p_directions * (direction_count / tau)).floor() / direction_count) * tau
149
+
150
+
# move
151
+
p_positions = offset_coord(p_positions, p_directions, move_distance)
152
+
clamp_coords(p_positions, h, w)
153
+
154
+
# deposit
155
+
idxput(world, p_positions.long(), green * g_strength)
156
+
157
+
idxput(world, stimulus_positions_r.long(), red * r_strength)
158
+
idxput(world, stimulus_positions_b.long(), blue * b_strength)
159
+
160
+
# diffuse
161
+
world = blur(world)
162
+
world[2][None] = blur(world[2][None])
163
+
164
+
crowding_mask = world[1] > 0.9
165
+
world[0] += 0.1 * crowding_mask
166
+
loneliness_mask = world[1] < 0.01
167
+
world[0] += 0.005 * loneliness_mask
168
+
169
+
world[2] *= (1 - world[1]).clamp(0,1) ** 0.4
170
+
171
+
# render
172
+
video.frame(world)
173
+
174
+
if iteration % save_mod == 0 and iteration != 0:
175
+
save(world, f"{run_dir}/{iteration:06d}")
176
+
177
+
if iteration in also_save:
178
+
save(world, f"{run_dir}/{iteration:06d}")
179
+
180
+
# decay
181
+
if iteration < max_iterations - 1:
182
+
world *= (1-decay_rate)
183
+
184
+
save(world, f"{run_dir}/final")
+117
sketch/old/snakepyt_0_0/pix_sort.py
+117
sketch/old/snakepyt_0_0/pix_sort.py
···
1
+
2
+
import torch
3
+
from torch.nn.functional import conv2d
4
+
from math import tau
5
+
6
+
from lib.util import load_image_tensor, save, msave
7
+
from diffusers import AutoencoderKL
8
+
9
+
t_fp = torch.double
10
+
11
+
def persistent():
12
+
pass
13
+
#vae = AutoencoderKL.from_pretrained(
14
+
# "/ssd/0/ml_models/ql/hf-diff/stable-diffusion-xl-base-0.9", subfolder="vae"
15
+
#)
16
+
#vae.to("cuda")
17
+
#vae.to(torch.float)
18
+
19
+
def decode(l, vae):
20
+
l = l.to(vae.dtype)
21
+
with torch.no_grad():
22
+
ims = vae.decode(l).sample
23
+
return ims
24
+
25
+
def encode(im, vae):
26
+
im = im.to(vae.dtype)
27
+
with torch.no_grad():
28
+
l = vae.encode(im)
29
+
return l
30
+
31
+
def init():
32
+
torch.set_default_device("cuda")
33
+
torch.set_default_dtype(t_fp)
34
+
35
+
im = load_image_tensor("in/sunset.jpg").to(t_fp).to("cuda")
36
+
#im = encode(im.unsqueeze(0), vae)[0].mean[0].to(t_fp)
37
+
38
+
#im = im.transpose(1,2)
39
+
40
+
threshold = 0.4
41
+
42
+
min_strip_size = 2
43
+
max_strip_size = 400000
44
+
45
+
# 1 = vertical, 2 = horizontal
46
+
sort_dim = 1
47
+
48
+
ranges = [(0.15, 0.33), (0.44, 0.9)]
49
+
def in_ranges(x):
50
+
for r in ranges:
51
+
if x > r[0] and x < r[1]:
52
+
return True
53
+
return False
54
+
55
+
column_selection = lambda w: [x for x in range(w) if in_ranges(x/w)]
56
+
57
+
58
+
schedule(run, None)
59
+
60
+
def convolve(field, kernel):
61
+
return conv2d(field.unsqueeze(0), kernel, bias=None, padding=[1, 0], stride=[1])[0]
62
+
63
+
kernel3 = torch.tensor([
64
+
[[[0], [-1], [1]], [[0], [0], [0]], [[0], [0], [0]]],
65
+
[[[0], [0], [0]], [[0], [-1], [1]], [[0], [0], [0]]],
66
+
[[[0], [0], [0]], [[0], [0], [0]], [[0], [-1], [1]]],
67
+
], dtype=t_fp, device="cuda")
68
+
69
+
kernel4 = torch.tensor([
70
+
[[[0], [-1], [1]], [[0], [0], [0]], [[0], [0], [0]], [[0], [0], [0]]],
71
+
[[[0], [0], [0]], [[0], [-1], [1]], [[0], [0], [0]], [[0], [0], [0]]],
72
+
[[[0], [0], [0]], [[0], [0], [0]], [[0], [-1], [1]], [[0], [0], [0]]],
73
+
[[[0], [0], [0]], [[0], [0], [0]], [[0], [0], [0]], [[0], [-1], [1]]],
74
+
], dtype=t_fp, device="cuda")
75
+
76
+
77
+
def run():
78
+
(_, h, w) = im.shape
79
+
80
+
out = im.clone() #torch.zeros_like(im)
81
+
group_masks = torch.zeros([h,w], dtype=torch.long)
82
+
83
+
edges = convolve(im, kernel3)
84
+
edges = (edges.norm(p=2, dim=0) > threshold)
85
+
86
+
scanner_encounters = torch.zeros([w], dtype=torch.long)
87
+
88
+
n_edges = edges.sum(dim=0)
89
+
print(n_edges)
90
+
91
+
for row_index in range(h):
92
+
scanner_encounters += edges[row_index]
93
+
94
+
group_masks[row_index] = scanner_encounters
95
+
96
+
n_groups = group_masks.max() + 1
97
+
98
+
for group_index in range(n_groups):
99
+
group_mask = group_masks == group_index
100
+
101
+
for column in column_selection(w):
102
+
inds = torch.where(group_mask[:, column])[0]
103
+
if inds.shape[0] < min_strip_size or inds.shape[0] > max_strip_size:
104
+
continue
105
+
i_min, i_max = inds.min(), inds.max()
106
+
strip = im[:, i_min:i_max, column]
107
+
strip_vals = strip.norm(p=2, dim=0)
108
+
s_vals, s_inds = torch.sort(strip_vals)
109
+
out[:, i_min:i_max, column] = strip[:, s_inds]
110
+
111
+
#out = 0.8 * out + 0.2 * im
112
+
#out = out.transpose(1,2)
113
+
#out = decode(out.unsqueeze(0), vae)[0]
114
+
save(out, f"{run_dir}/out")
115
+
#save(out, f"{run_dir}/out")
116
+
117
+
+78
sketch/old/snakepyt_0_0/plume/lines.py
+78
sketch/old/snakepyt_0_0/plume/lines.py
···
1
+
2
+
from math import tau
3
+
4
+
import torch
5
+
6
+
from lib.spaces import map_space, draw_points_2d, dotted_lines_2d, center_span, grid
7
+
from lib.util import save
8
+
9
+
def init():
10
+
t_real = torch.double
11
+
torch.set_default_device("cuda")
12
+
13
+
scale = 2 ** 12
14
+
15
+
#r = 1
16
+
17
+
#c,s = center_span((-r,r),(-r,r))
18
+
c = (0, -0.5)
19
+
s = (1,1)
20
+
21
+
mapping = map_space(c, s, [], (1,1), scale)
22
+
_, (h,w) = mapping
23
+
24
+
canvas = torch.zeros([3,h,w], dtype=t_real)
25
+
26
+
schedule(test0, None)
27
+
28
+
29
+
def test0():
30
+
global canvas
31
+
inner_scale = 50
32
+
points = torch.rand([2, 2, inner_scale ** 2], dtype=t_real)
33
+
colors = torch.ones([3], dtype=t_real)
34
+
r = colors.clone()
35
+
r[1] = 0
36
+
r[2] = 0
37
+
38
+
points[0] = grid(map_space(c, s, [], (1,1), inner_scale)).permute(2,0,1).view(2, inner_scale**2)
39
+
points[1] = points[0]
40
+
points[1,0] += s[1] / inner_scale
41
+
42
+
#p = points[0].clone()
43
+
44
+
#draw_points_2d(points[0], r, canvas, mapping)
45
+
#save(canvas, f"{run_dir}/test")
46
+
47
+
_points = points.clone()
48
+
49
+
dotted_lines_2d(_points, colors, 100, canvas, mapping)
50
+
#draw_points_2d(_points[0], r, canvas, mapping)
51
+
save(canvas, f"{run_dir}/{0:06d}")
52
+
53
+
for n in range(24 * 6):
54
+
p = _points
55
+
56
+
mult = 1 + (p[:,0]) / (p[:,0] * p[:,0] + p[:,1] * p[:,1])
57
+
points[:,0] = p[:,0] * mult - 0.804
58
+
points[:,1] = p[:,1] * mult
59
+
#p = points[1]
60
+
61
+
_points[0] = points[0]
62
+
_points[1] = points[1]
63
+
64
+
#_points[1] /= _points[1].norm(p=2,dim=0)
65
+
#_points[1] /= inner_scale / s[0]
66
+
#points[1] *= 10
67
+
#_points[1] += _points[0]
68
+
canvas *= 0
69
+
dotted_lines_2d(_points, colors, 100, canvas, mapping)
70
+
#draw_points_2d(_points[0], r, canvas, mapping)
71
+
save(canvas, f"{run_dir}/{n+1:06d}")
72
+
73
+
74
+
#dotted_lines_2d(points, colors, 30, canvas, mapping)
75
+
#draw_points_2d(points[0], r, canvas, mapping)
76
+
77
+
#save(canvas, f"{run_dir}/canvas")
78
+
+136
sketch/old/snakepyt_0_0/plume/plume.py
+136
sketch/old/snakepyt_0_0/plume/plume.py
···
1
+
2
+
import math
3
+
import time
4
+
from pathlib import Path
5
+
from random import random
6
+
7
+
import torch
8
+
9
+
from lib.util import *
10
+
from lib.spaces import insert_at_coords, map_space
11
+
12
+
def init():
13
+
schedule(_settings_0, range(30))
14
+
15
+
name = "blus"
16
+
17
+
device = "cuda"
18
+
torch.set_default_device(device)
19
+
20
+
t_real = torch.double
21
+
t_complex = torch.cdouble
22
+
23
+
tau = 3.14159265358979323 * 2
24
+
25
+
randomize = False
26
+
random_range = tau #/ 50
27
+
show_gridlines = False
28
+
29
+
particle_count = 100000
30
+
31
+
32
+
# 2 ** 9 = 512; 10 => 1024; 11 => 2048
33
+
scale_power = 10
34
+
scale = 2 ** scale_power
35
+
36
+
origin = 0, 0
37
+
span = 5, 5
38
+
39
+
stretch = 1, 1
40
+
41
+
zooms = [
42
+
]
43
+
44
+
45
+
p_positions = (torch.rand([particle_count], device=device, dtype=t_complex) - (0.5 + 0.5j)) * 4
46
+
p_colors = torch.ones([particle_count,3], device=device, dtype=t_real)
47
+
color_rotation = torch.linspace(0, tau / 4, particle_count)
48
+
49
+
p_colors[:,0] = torch.frac((p_positions.real / 4 + 0.5) * 10)
50
+
p_colors[:,2] = torch.frac((p_positions.imag / 4 + 0.5) * 10)
51
+
p_colors[:,1] = (1 - (p_colors[:,0] + p_colors[:,2])).clamp(0,1)
52
+
53
+
ones = torch.ones_like(p_positions)
54
+
55
+
def blus(a, b):
56
+
theta_a = a.angle()
57
+
return torch.polar(a.abs() + b.abs() * torch.cos(b.angle() - theta_a), theta_a)
58
+
59
+
def get_transformation(direction, flow, ebb, rescaling):
60
+
def transformation(p):
61
+
#return blus(p, ones) - ones
62
+
#if randomize:
63
+
# direction = flow * torch.polar(ones.real, (random() * random_range - random_range / 2) * ones.real)
64
+
result = blus(p, direction * flow) - direction * ebb
65
+
res_abs = result.abs()
66
+
if rescaling:
67
+
result = torch.polar(2 * res_abs / res_abs.max(), result.angle())
68
+
return result
69
+
return transformation
70
+
71
+
72
+
def _settings_0(seed):
73
+
if seed % 2 == 0:
74
+
ebb = 0
75
+
iterations = 1
76
+
flow = 2 / iterations
77
+
rescaling = False
78
+
elif seed % 2 == 1:
79
+
flow = 0
80
+
iterations = 1
81
+
ebb = 0.333 / iterations
82
+
rescaling = False
83
+
elif False:#seed % 3 == 2:
84
+
flow = 0
85
+
ebb = 0
86
+
iterations = 10
87
+
rescaling = True
88
+
angle = 0 #if seed < 2 else tau / 4
89
+
direction = torch.polar(ones.real, 1 * tau * ones.real)
90
+
next_positions = get_transformation(direction, flow, ebb, rescaling)
91
+
schedule(run, None)
92
+
93
+
def settings_1(idx):
94
+
if idx % 2 == 0:
95
+
iterations = 60
96
+
ebb = 0
97
+
flow = 1 / iterations
98
+
else:
99
+
iterations = 30
100
+
ebb = 1 / iterations
101
+
flow = 0
102
+
rescaling = False
103
+
angle = (idx // 2) * (tau / 4)
104
+
direction = torch.polar(ones.real, angle * ones.real)
105
+
next_positions = get_transformation(direction, flow, ebb, rescaling)
106
+
schedule(run, None)
107
+
108
+
109
+
110
+
111
+
frame_index = [0]
112
+
113
+
mapping = map_space(origin, span, zooms, stretch, scale)
114
+
(_, (h,w)) = mapping
115
+
116
+
def run():
117
+
scratch = torch.zeros([h, w, 3], device=device, dtype=t_real)
118
+
119
+
def project(p):
120
+
return torch.view_as_real(p).permute(1,0)
121
+
122
+
for iteration in range(iterations):
123
+
frame_index[0] += 1
124
+
p_projected = project(p_positions).clone()
125
+
126
+
if iteration % 1 == 0:
127
+
scratch *= 0
128
+
insert_at_coords(p_projected, p_colors, scratch, mapping)
129
+
if show_gridlines:
130
+
scratch[:,:,1] += gridlines
131
+
scratch[:,:,2] += gridlines
132
+
save(scratch.permute(2,0,1).sqrt(), f"{run_dir}/{frame_index[0]:06d}")
133
+
134
+
p_positions.copy_(next_positions(p_positions))
135
+
136
+
+168
sketch/old/snakepyt_0_0/plume/plume3d.py
+168
sketch/old/snakepyt_0_0/plume/plume3d.py
···
1
+
2
+
import math
3
+
import time
4
+
from pathlib import Path
5
+
from random import random
6
+
7
+
import torch
8
+
9
+
from lib.util import *
10
+
from lib.spaces import insert_at_coords, map_space
11
+
12
+
def init():
13
+
schedule(_settings_0, range(10000))
14
+
15
+
name = "plume3"
16
+
17
+
device = "cuda"
18
+
torch.set_default_device(device)
19
+
20
+
t_real = torch.double
21
+
t_complex = torch.cdouble
22
+
23
+
tau = 3.14159265358979323 * 2
24
+
25
+
randomize = False
26
+
random_range = tau #/ 50
27
+
28
+
particle_count = 1000000
29
+
30
+
31
+
# 2 ** 9 = 512; 10 => 1024; 11 => 2048
32
+
scale_power = 10
33
+
scale = 2 ** scale_power
34
+
35
+
origin = 0, 0
36
+
span = 2, 2
37
+
38
+
stretch = 1, 1
39
+
40
+
zooms = [
41
+
]
42
+
43
+
44
+
p_positions = (torch.rand([particle_count,3], device=device, dtype=t_real) - (0.5))
45
+
#p_positions[:,0] -= 0.5
46
+
#p_positions[:,1] -= 0.5
47
+
#p_positions[:,2] = 0# -= 0.5
48
+
p_positions *= 1
49
+
p_colors = torch.ones([particle_count,3], device=device, dtype=t_real)
50
+
color_rotation = torch.linspace(0, tau / 4, particle_count)
51
+
52
+
p_colors[:,0] = torch.frac((p_positions[:,0] / 4 + 0.5) * 10)
53
+
#p_colors[:,1] = torch.frac((p_positions[:,1] / 2 + 0.5) * 10)
54
+
p_colors[:,2] = torch.frac((p_positions[:,1] / 4 + 0.5) * 10)
55
+
p_colors[:,1] = (1 - (p_colors[:,0] + p_colors[:,2])).clamp(0,1)
56
+
57
+
ones = torch.ones_like(p_positions)
58
+
59
+
def proj(u, v):
60
+
dot = (u * v).sum(dim=1)
61
+
scale = (dot / (v.norm(p=2, dim=1) ** 2))
62
+
out = v.clone()
63
+
# todo: find better way
64
+
for dim in range(3):
65
+
out[:,dim] *= scale
66
+
return out
67
+
68
+
def proj_shift(a, b):
69
+
return a + proj(b, a)
70
+
71
+
def get_transformation(direction, flow, ebb, rescaling):
72
+
def transformation(p):
73
+
#return blus(p, ones) - ones
74
+
#if randomize:
75
+
# direction = flow * torch.polar(ones.real, (random() * random_range - random_range / 2) * ones.real)
76
+
if flow == 0:
77
+
result = p - direction * ebb
78
+
else:
79
+
result = proj_shift(p, direction * flow) - direction * ebb
80
+
#res_abs = result.abs()
81
+
if rescaling:
82
+
result /= result.norm(p=2,dim=1).max()
83
+
#result = torch.polar(2 * res_abs / res_abs.max(), result.angle())
84
+
return result
85
+
return transformation
86
+
87
+
88
+
def _settings_0(seed):
89
+
direction = ones.clone()
90
+
direction[:,1] = 0
91
+
direction[:,2] = 0
92
+
show_frame = True
93
+
if seed % 3 == 0:
94
+
ebb = 0
95
+
iterations = 1
96
+
flow = 1 / iterations
97
+
rescaling = False
98
+
show_frame = seed == 9999
99
+
elif seed % 3 == 1:
100
+
flow = 0
101
+
iterations = 1
102
+
ebb = 0.87 / iterations
103
+
rescaling = False
104
+
show_frame = False
105
+
elif seed % 3 == 2:
106
+
flow = 0
107
+
ebb = 0
108
+
iterations = 1
109
+
rescaling = True
110
+
show_frame = False
111
+
next_positions = get_transformation(direction, flow, ebb, rescaling)
112
+
schedule(run, None)
113
+
114
+
115
+
frame_index = [0]
116
+
117
+
mapping = map_space(origin, span, zooms, stretch, scale)
118
+
(_, (h,w)) = mapping
119
+
120
+
rotation_rate = 0#.02
121
+
rotation_rate_b = 0#.025
122
+
123
+
def project(p, c, transform):
124
+
(offset, rotation) = transform
125
+
_p = p - offset
126
+
_p = _p / _p.norm(p=2,dim=1).max()
127
+
_p = (rotation @ _p.transpose(1,0)).transpose(1,0)
128
+
c_filter = _p[:,2] < 99990
129
+
_c = c.clone()
130
+
for d in range(3):
131
+
_c[:,d] *= c_filter
132
+
return (_p[:,0:2].permute(1,0), _c)
133
+
134
+
scratch = torch.zeros([h, w, 3], device=device, dtype=t_real)
135
+
136
+
def run():
137
+
global scratch
138
+
for iteration in range(iterations):
139
+
if show_frame:
140
+
c = math.cos(frame_index[0] * rotation_rate)
141
+
s = math.sin(frame_index[0] * rotation_rate)
142
+
rotation_a = torch.tensor([
143
+
[1, 0, 0],
144
+
[0, c,-s],
145
+
[0, s, c]], dtype=torch.double)
146
+
c = math.cos(frame_index[0] * rotation_rate_b)
147
+
s = math.sin(frame_index[0] * rotation_rate_b)
148
+
rotation_b = torch.tensor([
149
+
[c,-s, 0],
150
+
[s, c, 0],
151
+
[0, 0, 1]], dtype=torch.double)
152
+
dist = direction
153
+
offset = flow * dist * (iteration)
154
+
offset -= ebb * dist * iteration
155
+
156
+
transform = (offset, rotation_a @ rotation_b)
157
+
158
+
frame_index[0] += 1
159
+
p_projected, c_filtered = project(p_positions, p_colors, transform)
160
+
161
+
if iteration % 1 == 0:
162
+
scratch *= 0
163
+
insert_at_coords(p_projected, c_filtered, scratch, mapping)
164
+
save(scratch.permute(2,0,1).sqrt(), f"{run_dir}/{frame_index[0]:06d}")
165
+
166
+
p_positions.copy_(next_positions(p_positions))
167
+
168
+
+171
sketch/old/snakepyt_0_0/plume/plume3d_b.py
+171
sketch/old/snakepyt_0_0/plume/plume3d_b.py
···
1
+
2
+
import math
3
+
import time
4
+
from pathlib import Path
5
+
from random import random
6
+
import gc
7
+
8
+
import torch
9
+
10
+
from lib.util import *
11
+
from lib.spaces import insert_at_coords, map_space, cgrid
12
+
13
+
14
+
name = "plume3"
15
+
16
+
device = "cuda"
17
+
torch.set_default_device(device)
18
+
gc.collect()
19
+
torch.cuda.empty_cache()
20
+
21
+
t_real = torch.double
22
+
t_complex = torch.cdouble
23
+
24
+
tau = 3.14159265358979323 * 2
25
+
26
+
randomize = False
27
+
random_range = tau #/ 50
28
+
29
+
particle_count = 100
30
+
31
+
32
+
# 2 ** 9 = 512; 10 => 1024; 11 => 2048
33
+
scale_power = 7
34
+
scale = 2 ** scale_power
35
+
36
+
origin = 0.75, 0.75
37
+
span = 0.5, 0.5
38
+
39
+
stretch = 1, 1
40
+
41
+
zooms = [
42
+
]
43
+
44
+
#p_positions = (torch.rand([particle_count,2], device=device, dtype=t_real) - (0.5))
45
+
46
+
#ones = torch.ones_like(p_positions)
47
+
48
+
def proj(u, v):
49
+
dot = (u * v).sum(dim=2)
50
+
norm_sq = v.norm(p=2, dim=2) ** 2
51
+
zero_mask = norm_sq == 0
52
+
norm_sq += zero_mask
53
+
scale = (dot / norm_sq) * ~zero_mask
54
+
out = v.clone()
55
+
for dim in range(out.shape[2]):
56
+
out[:,:,dim] *= scale
57
+
return out
58
+
59
+
def proj_shift(a,b):
60
+
return a + proj(b,a)
61
+
62
+
def proj_shift_(a, b):
63
+
dot = (a * b).sum(dim=2)
64
+
scale = (dot / (b.norm(p=2, dim=2) ** 2))
65
+
for dim in range(a.shape[2]):
66
+
a[:,:,dim] *= 1 + scale
67
+
return a
68
+
69
+
def get_transformation(direction, flow, ebb, rescaling):
70
+
def transformation(p):
71
+
#if flow == 0:
72
+
# result = p - direction * ebb
73
+
#else:
74
+
_ebb = torch.outer(ebb, direction).unsqueeze(1).expand(-1, particle_count, -1)
75
+
_flow = torch.outer(flow, direction).unsqueeze(1).expand(-1, particle_count, -1)
76
+
result = proj_shift(p, _flow) - _ebb
77
+
if rescaling:
78
+
result /= result.norm(p=2,dim=1).max()
79
+
return result
80
+
return transformation
81
+
82
+
def init():
83
+
mapping = map_space(origin, span, zooms, stretch, scale)
84
+
(_, (h,w)) = mapping
85
+
86
+
ab = cgrid(mapping)
87
+
88
+
image = torch.zeros([h, w, 3], device="cpu", dtype=t_real)
89
+
90
+
p_positions = torch.rand([w, particle_count, 2], device=device, dtype=t_real) - 0.5
91
+
92
+
schedule(rows, range(h))
93
+
94
+
def rows(row_index):
95
+
96
+
schedule(columns, None)#range(w))
97
+
98
+
schedule(out, None)
99
+
100
+
def out():
101
+
im = image.clone()
102
+
for c in range(3):
103
+
im[:,:,c] = im[:,:,c] > 0
104
+
#im[:,:,c] /= im[:,:,c].max()
105
+
save(image.permute(2,0,1), f"{run_dir}/{frame_index[0]}")
106
+
frame_index[0] += 1
107
+
108
+
def columns():
109
+
direction = torch.tensor([1,0], dtype=t_real)#ones_like(p_positions)#.clone()
110
+
#direction[:,1] = 0
111
+
112
+
iterations = 1000
113
+
ebb = ab[row_index].real
114
+
flow = ab[row_index].imag
115
+
116
+
rescaling = True
117
+
118
+
119
+
next_positions = get_transformation(direction, flow, ebb, rescaling)
120
+
121
+
schedule(run, None)
122
+
123
+
124
+
125
+
def c_avg(prev, val, n):
126
+
"""cumulative average
127
+
prev: previous result (should be zero for n=1)
128
+
val: next value
129
+
n: how many values given so far, including this value"""
130
+
prev_inf_mask = torch.isinf(prev)
131
+
_next = prev + (val - prev) / n
132
+
return torch.nan_to_num(_next) * ~prev_inf_mask + prev * prev_inf_mask
133
+
134
+
135
+
frame_index = [0]
136
+
137
+
def run():
138
+
global scratch
139
+
140
+
lyapunov_avg = torch.zeros([w,particle_count], dtype=t_real)
141
+
diverge_iter = torch.ones([w], dtype=t_real) * torch.inf
142
+
143
+
var_avg = torch.zeros([w], dtype=t_real)
144
+
converge_iter = torch.ones([w], dtype=t_real) * torch.inf
145
+
146
+
skip = 500
147
+
148
+
for iteration in range(iterations):
149
+
_next = next_positions(p_positions)
150
+
if (iteration > skip):
151
+
var = _next.var(dim=(1,2))
152
+
lyapunov_term = (_next - p_positions).norm(p=2,dim=2).log()
153
+
lyapunov_avg = c_avg(lyapunov_avg, lyapunov_term, iteration - skip)
154
+
converge = (var - var_avg) < -(10**5)
155
+
var_avg = c_avg(var_avg, var, iteration - 5)
156
+
if converge.any():
157
+
div_now = converge * (iteration - skip) / (iterations - skip) + torch.nan_to_num(~converge * torch.inf, 0, torch.inf)
158
+
converge_iter = torch.min(converge_iter, div_now)
159
+
p_positions.copy_(_next)
160
+
161
+
image[row_index, :, 0] = torch.nan_to_num(converge_iter, 0, 0, 0)
162
+
means = lyapunov_avg.mean(dim=1)
163
+
image[row_index, :, 2] = torch.nan_to_num(means, 0, 0, 0).abs()
164
+
#image[row_index, :, 1] = torch.nan_to_num(means == 0, 0, 0, 0).abs()
165
+
166
+
167
+
means = var_avg.abs()
168
+
image[row_index, :, 1] = torch.nan_to_num(means, 0, 0, 0).abs()
169
+
170
+
171
+
+172
sketch/old/snakepyt_0_0/plume/plume_c.py
+172
sketch/old/snakepyt_0_0/plume/plume_c.py
···
1
+
2
+
import math
3
+
import time
4
+
from pathlib import Path
5
+
from random import random
6
+
7
+
import torch
8
+
9
+
from lib.util import *
10
+
from lib.spaces import insert_at_coords, map_space, cgrid
11
+
12
+
13
+
device = "cuda"
14
+
torch.set_default_device(device)
15
+
16
+
t_real = torch.double
17
+
t_complex = torch.cdouble
18
+
19
+
tau = 3.14159265358979323 * 2
20
+
21
+
randomize = False
22
+
random_range = tau #/ 50
23
+
24
+
# 2 ** 9 = 512; 10 => 1024; 11 => 2048
25
+
scale_power = 13
26
+
scale = 2 ** scale_power
27
+
28
+
29
+
stretch = 1, 1
30
+
31
+
zooms = [
32
+
]
33
+
34
+
def proj(u, v):
35
+
dot = (u * v).sum(dim=1)
36
+
scale = (dot / (v.norm(p=2, dim=1) ** 2))
37
+
out = v.clone()
38
+
for dim in range(out.shape[1]):
39
+
out[:,dim] *= scale
40
+
return out
41
+
42
+
def bad_proj(u, v):
43
+
dot = (u * v).sum(dim=1)
44
+
scale = (dot / v.norm(p=2, dim=1))
45
+
out = v.clone()
46
+
for dim in range(out.shape[1]):
47
+
out[:,dim] *= scale
48
+
return out
49
+
50
+
def proj_shift(a, b):
51
+
return a + proj(b, a)
52
+
53
+
def bad_proj_shift(a, b):
54
+
return a + bad_proj(b, a)
55
+
56
+
def get_transformation(direction, flow, ebb, rescaling):
57
+
def transformation(p):
58
+
d = direction
59
+
d[:,0] += torch.rand_like(direction[:,0]) * 0.0001
60
+
if flow == 0:
61
+
result = p - direction * ebb
62
+
else:
63
+
result = proj_shift(p, d * flow)
64
+
result -= direction * ebb #proj_shift(result, direction * ebb)
65
+
if rescaling:
66
+
result /= result.norm(p=2,dim=1).max()
67
+
return result
68
+
return transformation
69
+
70
+
def init():
71
+
steps = 1
72
+
schedule(per_t, [i / steps for i in range(steps)])
73
+
74
+
def eerp(a,b,t):
75
+
return math.pow(b/a, t)*a
76
+
77
+
def per_t(t):
78
+
origin = 0.0625, -0.15
79
+
80
+
#s = eerp(1000, 0.001, t)
81
+
s = 0.25
82
+
x_range = 0.5 * s
83
+
y_range = 1 * s
84
+
85
+
span = x_range, y_range
86
+
mapping = map_space(origin, span, zooms, stretch, scale)
87
+
(_, (h,w)) = mapping
88
+
89
+
grid = cgrid(mapping)
90
+
91
+
scratch = torch.zeros([h, w, 3], device=device, dtype=t_real)
92
+
p_positions = torch.zeros([h*w,2], device=device, dtype=t_real)
93
+
p_positions[:,0] = grid.real.reshape((h*w))
94
+
p_positions[:,1] = grid.imag.reshape((h*w))
95
+
96
+
direction = torch.ones_like(p_positions)
97
+
direction[:,0] = 0
98
+
99
+
iterations = 1301
100
+
#show_frame = lambda i: i == iterations - 1
101
+
show_frame = lambda i: i % 100 == 0#True
102
+
#show_frame = lambda i: True
103
+
104
+
#ebb = 0.804#0.9000469530469531 - 0.001 + 0.002 * t
105
+
#ebb = 1 + 0.4 * t
106
+
#ebb = 0.4
107
+
ebb = 0.83314# + (0.001 * t)
108
+
#ebb = (0.5 + 0.5 * t)#0.804#482 / 600
109
+
#ebb = 0.0 + 0.01 * t #0.9000469530469531 #+ 0.00001 * t #0.9000595404595405
110
+
flow = 1
111
+
rescaling = False
112
+
113
+
114
+
next_positions = get_transformation(direction, flow, ebb, rescaling)
115
+
schedule(run, None)
116
+
117
+
frame_index = [0]
118
+
119
+
def c_avg(prev, val, n):
120
+
"""cumulative average
121
+
prev: previous result (should be zero for n=1)
122
+
val: next value
123
+
n: how many values given so far, including this value"""
124
+
prev_inf_mask = torch.isinf(prev)
125
+
_next = prev + (val - prev) / n
126
+
return torch.nan_to_num(_next) * ~prev_inf_mask + prev * prev_inf_mask
127
+
128
+
129
+
def run():
130
+
global scratch
131
+
132
+
diff_avg = torch.zeros([h*w], device=device, dtype=t_real)
133
+
134
+
for iteration in range(iterations):
135
+
_next = next_positions(p_positions)
136
+
diff = (p_positions - 0).norm(p=2, dim=1)
137
+
138
+
diff_avg = c_avg(diff_avg, diff, iteration + 1)
139
+
#diff /= diff.max()
140
+
141
+
if show_frame(iteration):
142
+
#diff_avg = diff
143
+
frame_index[0] += 1
144
+
print(f"{frame_index[0]}: ebb={ebb}")
145
+
146
+
#pos_reshape = p_positions.reshape((h,w,2))
147
+
#scratch[:,:,0] = pos_reshape[:,:,0] + 0.5
148
+
#scratch[:,:,1] = pos_reshape[:,:,1] + 0.5
149
+
150
+
#scratch += 0.5
151
+
#scratch *= 2
152
+
scratch[:,:,2] = diff_avg.nan_to_num().reshape((h,w))
153
+
#scratch[:,:,2] = diff_avg.nan_to_num().reshape((h,w))
154
+
155
+
scratch[scratch < 0] = 0
156
+
157
+
#scratch[:,:,0] = scratch[:,:,0].pow(2)
158
+
#scratch[:,:,2] = scratch[:,:,2].pow(0.5)
159
+
160
+
scratch[:,:,2] -= scratch[:,:,2].mean()
161
+
scratch[:,:,2] /= scratch[:,:,2].std() * 6
162
+
#scratch[:,:,2] /= scratch[:,:,2].max()
163
+
164
+
scratch[:,:,2] += 0.5
165
+
166
+
#scratch[:,:,1] = diff.reshape((h,w)) / diff.max()
167
+
168
+
save(scratch.permute(2,0,1), f"{run_dir}/{frame_index[0]:06d}")
169
+
170
+
p_positions.copy_(_next)
171
+
172
+
+146
sketch/old/snakepyt_0_0/seam_carve.py
+146
sketch/old/snakepyt_0_0/seam_carve.py
···
1
+
import torch
2
+
from torch.nn.functional import conv2d
3
+
from math import tau
4
+
5
+
from diffusers import AutoencoderKL
6
+
7
+
from lib.util import load_image_tensor, save, msave
8
+
9
+
t_fp = torch.double
10
+
11
+
def persistent():
12
+
vae = AutoencoderKL.from_pretrained(
13
+
"/ssd/0/ml_models/ql/hf-diff/stable-diffusion-xl-base-0.9", subfolder="vae"
14
+
)
15
+
vae.to("cuda")
16
+
vae.to(torch.float)
17
+
18
+
def decode(l, vae):
19
+
l_dtype = l.dtype
20
+
l = l.unsqueeze(0).to(vae.dtype)
21
+
with torch.no_grad():
22
+
ims = vae.decode(l).sample
23
+
return ims.to(l_dtype)[0]
24
+
25
+
def encode(im, vae):
26
+
im_dtype = im.dtype
27
+
im = im.unsqueeze(0).to(vae.dtype)
28
+
with torch.no_grad():
29
+
l = vae.encode(im)
30
+
return l[0].mean[0].to(im_dtype)
31
+
32
+
33
+
def init():
34
+
torch.set_default_device("cuda")
35
+
torch.set_default_dtype(t_fp)
36
+
do_vae = False
37
+
shrink = False
38
+
do_horiz = True
39
+
remove = 1000
40
+
41
+
im = load_image_tensor("in/seam_code_b.png").to(t_fp).to("cuda")
42
+
if do_vae:
43
+
im = encode(im, vae)
44
+
if do_horiz:
45
+
im = im.transpose(1,2)
46
+
schedule(run, None)
47
+
48
+
def convolve(field, kernel, padding):
49
+
return conv2d(field.unsqueeze(0), kernel, bias=None, padding=padding, stride=[1])[0]
50
+
51
+
kernel3 = torch.tensor([
52
+
[[[-1], [0], [1]], [[0], [0], [0]], [[0], [0], [0]]],
53
+
[[[0], [0], [0]], [[-1], [0], [1]], [[0], [0], [0]]],
54
+
[[[0], [0], [0]], [[0], [0], [0]], [[-1], [0], [1]]],
55
+
], dtype=t_fp, device="cuda")
56
+
57
+
kernel4 = torch.tensor([
58
+
[[[-1], [0], [1]], [[0], [0], [0]], [[0], [0], [0]], [[0], [0], [0]]],
59
+
[[[0], [0], [0]], [[-1], [0], [1]], [[0], [0], [0]], [[0], [0], [0]]],
60
+
[[[0], [0], [0]], [[0], [0], [0]], [[-1], [0], [1]], [[0], [0], [0]]],
61
+
[[[0], [0], [0]], [[0], [0], [0]], [[0], [0], [0]], [[-1], [0], [1]]],
62
+
], dtype=t_fp, device="cuda")
63
+
64
+
def run():
65
+
prev_im = im
66
+
_shrink = shrink
67
+
try:
68
+
for iteration in range(remove):
69
+
prev_shrink = _shrink
70
+
_shrink = (iteration % 4) > 0
71
+
72
+
prev_im = prev_im.transpose(1,2) if prev_shrink != _shrink else prev_im
73
+
(c, h, w) = prev_im.shape
74
+
75
+
if _shrink:
76
+
out = torch.zeros([c, h, w-1])
77
+
else:
78
+
out = torch.zeros([c, h, w+1])
79
+
80
+
81
+
82
+
kernel = kernel3 if c == 3 else kernel4
83
+
edges = convolve(prev_im, kernel, [1,0])
84
+
edges += convolve(prev_im, kernel.transpose(2,3), [0,1])
85
+
energy = edges.norm(p=2, dim=0)
86
+
87
+
paths = energy.clone()
88
+
if not _shrink:
89
+
paths += torch.rand_like(energy) * (11 if not do_vae else 100)
90
+
paths_l = torch.roll(paths, 1, dims=1)
91
+
paths_r = torch.roll(paths, -1, dims=1)
92
+
paths_l[0] = torch.inf
93
+
paths_r[-1] = torch.inf
94
+
paths_stack = torch.stack((paths_l, paths, paths_r))
95
+
path_mins = torch.min(paths_stack, dim=0)
96
+
97
+
for row_index in range(1,h):
98
+
prev_vals = paths_stack[:, row_index - 1]
99
+
mins, inds = torch.min(prev_vals, dim=0)
100
+
paths_stack[:, row_index] += mins.expand((3,w))
101
+
102
+
paths = paths_stack[1]
103
+
104
+
vals, ind = torch.min(paths[-1], dim=0)
105
+
min_path = [ind.item()]
106
+
for row_index in range(h-1)[::-1]:
107
+
prev_ind = min_path[-1]
108
+
l_ind = ((prev_ind - 1) + w) % w
109
+
r_ind = (prev_ind + 2) % (w+1)
110
+
if prev_ind == 0 or prev_ind == (w - 1):
111
+
v, ind = torch.min(paths[row_index,(l_ind,prev_ind,r_ind-1)], dim=0)
112
+
else:
113
+
v, ind = torch.min(paths[row_index,l_ind:r_ind], dim=0)
114
+
min_path.append((prev_ind + ind.item() - 1 + w) % w)
115
+
116
+
for row_index in range(h):
117
+
split_at = min_path[-row_index]
118
+
if _shrink:
119
+
out[:,row_index,:split_at] = prev_im[:,row_index,:split_at]
120
+
out[:,row_index,split_at:] = prev_im[:,row_index,(split_at+1):]
121
+
else: # grow
122
+
out[:,row_index,:split_at] = prev_im[:,row_index,:split_at]
123
+
out[:,row_index,split_at] = prev_im[:,row_index,split_at]
124
+
out[:,row_index,(split_at+1):] = prev_im[:,row_index,split_at:]
125
+
126
+
prev_im = out
127
+
_do_horiz = not prev_shrink
128
+
129
+
if iteration % 1 == 0:
130
+
to_save = decode(out, vae) if do_vae else out
131
+
to_save = to_save.transpose(1,2) if _do_horiz else to_save
132
+
save(to_save, f"{run_dir}/{iteration:06d}")
133
+
#save(out.transpose(1,2), f"{run_dir}/{iteration:06d}")
134
+
except:
135
+
to_save = decode(prev_im, vae) if do_vae else out
136
+
to_save = to_save.transpose(1,2) if do_horiz else to_save
137
+
save(to_save, f"{run_dir}/{iteration:06d}")
138
+
#save(prev_im.transpose(1,2), f"{run_dir}/{iteration:06d}")
139
+
raise
140
+
141
+
#out = out.transpose(1,2)
142
+
to_save = decode(out, vae) if do_vae else out
143
+
to_save = to_save.transpose(1,2) if do_horiz else to_save
144
+
save(to_save, f"{run_dir}/out")
145
+
146
+
+8
-2
sketch/webserver.py
+8
-2
sketch/webserver.py
···
83
83
except Exception as e:
84
84
return build_response(500, f"Server error: {e}".encode())
85
85
86
-
def run():
86
+
def main():
87
87
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
88
88
ssl_ctx.load_cert_chain(certfile=SSL_CERT, keyfile=SSL_KEY)
89
89
···
94
94
print(f"Serving HTTPS on {HOST}:{PORT}")
95
95
with ssl_ctx.wrap_socket(server, server_side=True) as ssock:
96
96
while True:
97
-
conn, addr = ssock.accept()
97
+
try:
98
+
conn, addr = ssock.accept()
99
+
except KeyboardInterrupt:
100
+
raise
101
+
except:
102
+
print("exc")
103
+
continue
98
104
with conn:
99
105
request = conn.recv(4096)
100
106
response = handle_request(request)
-1
webui/content/main.html
-1
webui/content/main.html
···
10
10
<link id="themeLink" rel="stylesheet" type="text/css" href="style/theme/void.css">
11
11
<link rel="stylesheet" type="text/css" href="style/main.css">
12
12
<link rel="stylesheet" type="text/css" href="style/load.css">
13
-
<link rel="stylesheet" type="text/css" href="style/../../../foo.css">
14
13
</head>
15
14
<body>
16
15
<script src="main.js"></script>
+117
webui/js/col.js
+117
webui/js/col.js
···
1
+
2
+
const sheet = new CSSStyleSheet();
3
+
sheet.replaceSync(`
4
+
.hsplitter {
5
+
height: 1px;
6
+
background-color: var(--main-faded);
7
+
cursor: row-resize;
8
+
user-select: none;
9
+
-webkit-user-drag: none;
10
+
overflow: visible;
11
+
}
12
+
13
+
.hsplitter::before {
14
+
content: '';
15
+
position: relative;
16
+
display: inline-block;
17
+
top: -6px;
18
+
left: 0;
19
+
width: 100%;
20
+
height: 13px;
21
+
/*background-color: rgba(255, 0, 0, 0.1);*/
22
+
}
23
+
24
+
.target.top {
25
+
padding-bottom: 1rem;
26
+
}
27
+
28
+
.target.bottom {
29
+
padding-top: 1rem;
30
+
}
31
+
32
+
.target.middle {
33
+
padding-top: 1rem;
34
+
padding-bottom: 1rem;
35
+
}
36
+
`);
37
+
document.adoptedStyleSheets = [...document.adoptedStyleSheets, sheet];
38
+
39
+
export function main(target, n = 2) {
40
+
n = parseInt(n, 10);
41
+
if (!Number.isInteger(n) || n < 2) {
42
+
n = 2;
43
+
}
44
+
45
+
const container = document.createElement('div');
46
+
container.style.cssText = 'display: flex; flex-direction: column; height: 100%; width: 100%;';
47
+
48
+
const targets = [];
49
+
const splitters = [];
50
+
51
+
function createDragHandler(splitter, i) {
52
+
splitter.onmousedown = (e) => {
53
+
e.preventDefault();
54
+
55
+
function resizeCallback(e) {
56
+
const containerRect = container.getBoundingClientRect();
57
+
const relativeY = e.clientY - containerRect.top;
58
+
const percent = (relativeY / containerRect.height) * 100;
59
+
60
+
// Calculate total height of targets above this splitter
61
+
let topTotal = 0;
62
+
for (let j = 0; j <= i; j++) {
63
+
if (j === i) {
64
+
const newHeight = Math.max(1, Math.min(99, percent - topTotal));
65
+
targets[j].style.height = newHeight + '%';
66
+
} else {
67
+
topTotal += parseFloat(targets[j].style.height);
68
+
}
69
+
}
70
+
}
71
+
72
+
function cleanup() {
73
+
document.removeEventListener('mousemove', resizeCallback);
74
+
document.removeEventListener('mouseup', cleanup);
75
+
document.removeEventListener('mouseleave', cleanup);
76
+
}
77
+
78
+
document.addEventListener('mousemove', resizeCallback);
79
+
document.addEventListener('mouseup', cleanup);
80
+
document.addEventListener('mouseleave', cleanup);
81
+
};
82
+
}
83
+
84
+
for (let i = 0; i < n; i++) {
85
+
const target = document.createElement('div');
86
+
target.style.height = `${100/n}%`;
87
+
88
+
if (i === 0) {
89
+
target.className = "target top";
90
+
} else if (i === n - 1) {
91
+
target.className = "target bottom";
92
+
target.style.flex = '1'; // Last one gets flex to handle rounding
93
+
target.style.height = 'auto';
94
+
} else {
95
+
target.className = "target middle";
96
+
}
97
+
98
+
targets.push(target);
99
+
container.appendChild(target);
100
+
101
+
if (i === n - 1) continue;
102
+
103
+
const splitter = document.createElement('div');
104
+
splitter.className = 'hsplitter';
105
+
splitters.push(splitter);
106
+
container.appendChild(splitter);
107
+
108
+
createDragHandler(splitter, i);
109
+
}
110
+
111
+
target.appendChild(container);
112
+
113
+
return {
114
+
targets: targets
115
+
};
116
+
}
117
+
+9
webui/js/exit.js
+9
webui/js/exit.js
+4
webui/js/main.js
+4
webui/js/main.js
+42
webui/js/prompt.js
+42
webui/js/prompt.js
···
1
+
2
+
export function main(target) {
3
+
const container = document.createElement('div');
4
+
5
+
const input = document.createElement('input');
6
+
input.type = 'text';
7
+
input.placeholder = '...';
8
+
9
+
async function handleLoad() {
10
+
const inputValue = input.value.trim();
11
+
const inputSplit = inputValue.split(/\s+/);
12
+
const moduleName = inputSplit[0];
13
+
const args = inputSplit.slice(1);
14
+
15
+
try {
16
+
const module = await import(`/${moduleName}.js`);
17
+
if ("main" in module) {
18
+
const result = await module.main(target, ...args);
19
+
if (result?.targets?.length) {
20
+
for (const targetElement of result.targets) {
21
+
main(targetElement);
22
+
}
23
+
container.remove();
24
+
}
25
+
}
26
+
27
+
} catch (error) {
28
+
console.error('Failed to load module:', error.message);
29
+
}
30
+
}
31
+
32
+
input.addEventListener('keypress', async (e) => {
33
+
if (e.key === 'Enter') {
34
+
handleLoad();
35
+
}
36
+
});
37
+
38
+
container.appendChild(input);
39
+
target.appendChild(container);
40
+
input.focus();
41
+
}
42
+
+117
webui/js/row.js
+117
webui/js/row.js
···
1
+
2
+
const sheet = new CSSStyleSheet();
3
+
sheet.replaceSync(`
4
+
.vsplitter {
5
+
width: 1px;
6
+
background-color: var(--main-faded);
7
+
cursor: col-resize;
8
+
user-select: none;
9
+
-webkit-user-drag: none;
10
+
overflow: visible;
11
+
}
12
+
13
+
.vsplitter::before {
14
+
content: '';
15
+
position: relative;
16
+
display: inline-block;
17
+
top: 0;
18
+
left: -6px;
19
+
width: 13px;
20
+
height: 100%;
21
+
/*background-color: rgba(255, 0, 0, 0.1);*/
22
+
}
23
+
24
+
.target.left {
25
+
padding-right: 1rem;
26
+
}
27
+
28
+
.target.right {
29
+
padding-left: 1rem;
30
+
}
31
+
32
+
.target.middle {
33
+
padding-left: 1rem;
34
+
padding-right: 1rem;
35
+
}
36
+
`);
37
+
document.adoptedStyleSheets = [...document.adoptedStyleSheets, sheet];
38
+
39
+
export function main(target, n = 2) {
40
+
n = parseInt(n, 10);
41
+
if (!Number.isInteger(n) || n < 2) {
42
+
n = 2;
43
+
}
44
+
45
+
const container = document.createElement('div');
46
+
container.style.cssText = 'display: flex; height: 100%; width: 100%;';
47
+
48
+
const targets = [];
49
+
const splitters = [];
50
+
51
+
function createDragHandler(splitter, i) {
52
+
splitter.onmousedown = (e) => {
53
+
e.preventDefault();
54
+
55
+
function resizeCallback(e) {
56
+
const containerRect = container.getBoundingClientRect();
57
+
const relativeX = e.clientX - containerRect.left;
58
+
const percent = (relativeX / containerRect.width) * 100;
59
+
60
+
// Calculate total width of targets to the left of this splitter
61
+
let leftTotal = 0;
62
+
for (let j = 0; j <= i; j++) {
63
+
if (j === i) {
64
+
const newWidth = Math.max(1, Math.min(99, percent - leftTotal));
65
+
targets[j].style.width = newWidth + '%';
66
+
} else {
67
+
leftTotal += parseFloat(targets[j].style.width);
68
+
}
69
+
}
70
+
}
71
+
72
+
function cleanup() {
73
+
document.removeEventListener('mousemove', resizeCallback);
74
+
document.removeEventListener('mouseup', cleanup);
75
+
document.removeEventListener('mouseleave', cleanup);
76
+
}
77
+
78
+
document.addEventListener('mousemove', resizeCallback);
79
+
document.addEventListener('mouseup', cleanup);
80
+
document.addEventListener('mouseleave', cleanup);
81
+
};
82
+
}
83
+
84
+
for (let i = 0; i < n; i++) {
85
+
const target = document.createElement('div');
86
+
target.style.width = `${100/n}%`;
87
+
88
+
if (i === 0) {
89
+
target.className = "target left";
90
+
} else if (i === n - 1) {
91
+
target.className = "target right";
92
+
target.style.flex = '1'; // Last one gets flex to handle rounding
93
+
target.style.width = 'auto';
94
+
} else {
95
+
target.className = "target middle";
96
+
}
97
+
98
+
targets.push(target);
99
+
container.appendChild(target);
100
+
101
+
if (i === n - 1) continue;
102
+
103
+
const splitter = document.createElement('div');
104
+
splitter.className = 'vsplitter';
105
+
splitters.push(splitter);
106
+
container.appendChild(splitter);
107
+
108
+
createDragHandler(splitter, i);
109
+
}
110
+
111
+
target.appendChild(container);
112
+
113
+
return {
114
+
targets: targets
115
+
};
116
+
}
117
+
+5
webui/readme
+5
webui/readme
···
1
+
old_fic_svelte contains the old .svelte files from the original ficscript UI
2
+
3
+
the styling and scripts are probably still a useful baseline / reference for a lot of what i want to do with this repo
4
+
5
+
but i don't want to use svelte, so a little bit of work needs to be done to strip it down to html/css/js