๐Ÿ๐Ÿ๐Ÿ
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

initial commit

+984
+3
.gitignore
··· 1 + .git 2 + __pycache__ 3 + out
+115
abelian_sandpile.py
··· 1 + import math 2 + import random 3 + 4 + import torch 5 + 6 + from util import * 7 + 8 + @settings 9 + def _settings(): 10 + scale = 2 ** 10 11 + w = scale + 1 12 + h = scale + 1 13 + 14 + iterations = 500000 15 + debug_modulus = 10000 16 + 17 + out_file = "hng" 18 + 19 + device="cuda" 20 + ctype=torch.cdouble 21 + dtype=torch.double 22 + 23 + @ifmain(__name__) 24 + @timed 25 + def _main(): 26 + _ = _settings() 27 + torch.set_default_device(_.device) 28 + torch.set_default_dtype(_.dtype) 29 + dim = [_.h, _.w] 30 + 31 + grid = torch.zeros(dim, dtype=torch.int) 32 + 33 + #offsets = [torch.tensor(x) for x in [[0,1],[0,-1],[1,0],[-1,0]]] 34 + #offsets = [[1,1],[1,-1],[-1,1],[-1,-1],[0,2],[0,-2],[2,0],[-2,0]] 35 + #offsets = [[0,10],[10,0],[5,10],[10,5],[5,5],[-1,0],[0,-1]] 36 + #offsets = [[0,5],[5,0],[0,-5],[-5,0],[1,4],[4,1],[-1,4],[4,-1],[1,-4],[-4,1],[-4,-1],[-1,-4]] 37 + offsets = [[0,3],[3,0],[1,2],[2,1],[13,3],[3,13],[1,0],[0,1],[1,1],[0,0]] 38 + #offsets = [x for x in [ 39 + # [1,1], 40 + # [0,1], 41 + # [1,0], 42 + # [0,-5], 43 + # [-5,-5], 44 + # [-5,0] 45 + # ]] 46 + 47 + max_c = 100 48 + c_total = 0 49 + last_c_total = 100 50 + 51 + n = len(offsets) 52 + 53 + for iteration in range(_.iterations): 54 + #spots = torch.randn(dim) > 2.5 55 + #grid += spots 56 + #p = [_.h // 2, _.w // 2] 57 + p = [0,0] 58 + #p = [random.randint(0,_.h-1), random.randint(0,_.w-1)] 59 + last = torch.clone(grid) 60 + grid[p[0],p[1]] = n 61 + peaks = (grid >= n).int() 62 + #peaks = torch.nonzero(grid >= 4) 63 + c = 0 64 + while torch.count_nonzero(peaks): 65 + #while len(peaks) != 0: 66 + c += 1 67 + #p0 = peaks[0] 68 + #pts = [p0 + o for o in offsets] 69 + #pts = [p for p in pts if 0 < p[0] < _.h and 0 < p[1] < _.w] 70 + #for p in pts: 71 + # grid[p[0],p[1]] += 1 72 + #grid[p0[0],p0[1]] -= 4 73 + shift = -(n * peaks) 74 + for offset in offsets: 75 + p_shift = torch.roll(peaks, offset, dims=[0,1]) 76 + if offset[0] < 0: 77 + p_shift[offset[0]:] = 0 78 + else: 79 + p_shift[:offset[0]] = 0 80 + if offset[1] < 0: 81 + p_shift[:,offset[1]:] = 0 82 + else: 83 + p_shift[:,:offset[1]] = 0 84 + shift += p_shift 85 + grid += shift 86 + 87 + #pa = torch.roll(peaks, 1, dims=0) 88 + #pb = torch.roll(peaks, -1, dims=0) 89 + #pc = torch.roll(peaks, 1, dims=1) 90 + #pd = torch.roll(peaks, -1, dims=1) 91 + #pa[0] = 0 92 + #pb[-1] = 0 93 + #pc[:,0] = 0 94 + #pd[:,-1] = 0 95 + #shift = pa + pb + pc + pd - (4 * peaks) 96 + #grid += shift 97 + 98 + peaks = (grid >= n).int() 99 + #peaks = torch.nonzero(grid >= 4) 100 + c_total += c 101 + if c > 20: 102 + s = "*" if c >= max_c else "" 103 + print(f"{iteration}: {c}{s}") 104 + if c > (0.9 * max_c): 105 + msave(grid / (n-1), f"{_.out_file}_{iteration}_{c}") 106 + msave(last / (n-1), f"{_.out_file}_{iteration}_prev") 107 + if c > max_c: 108 + max_c = c 109 + if iteration % _.debug_modulus == 0: 110 + msave(grid / (n-1), f"{_.out_file}_{iteration}_") 111 + if c_total > (last_c_total * 2): 112 + msave(grid / (n-1), f"{_.out_file}_{iteration}_ct{c_total}") 113 + last_c_total = c_total 114 + 115 + msave(grid / (n-1), _.out_file)
+202
fluid.py
··· 1 + import time 2 + from pathlib import Path 3 + 4 + import torch 5 + from torch.nn.functional import conv2d as convolve 6 + 7 + from util import * 8 + 9 + @settings 10 + def _s(): 11 + scale = 2**8 12 + w = scale 13 + h = scale 14 + 15 + max_iterations = 10000 16 + save_every = 10 17 + 18 + v_diffusion_rate = 2000 19 + m_diffusion_rate = 0.5 20 + m_timescale = 0.1 21 + timescale = 0.01 22 + 23 + name = "fluid" 24 + 25 + device = "cuda" 26 + 27 + 28 + diffuse_kernel = torch.tensor([[[[0,1,0],[1,0,1],[0,1,0]]]], dtype=torch.float, device="cuda") 29 + project_kernel_x = torch.tensor([[[[0,0,0],[-1,0,1],[0,0,0]]]], dtype=torch.float, device="cuda") 30 + project_kernel_y = torch.tensor([[[[0,-1,0],[0,0,0],[0,1,0]]]], dtype=torch.float, device="cuda") 31 + 32 + def continuous_boundary(field): 33 + field[0] = field[1] 34 + field[-1] = field[-2] 35 + field[:,0] = field[:,1] 36 + field[:,-1] = field[:,-2] 37 + field[(0,0,-1,-1),(0,-1,0,-1)] = 0.5 * (field[(0,0,-2,-2),(1,-2,0,-1)] + field[(1,1,-1,-1),(0,-1,1,-2)]) 38 + 39 + def opposed_v_boundary(field): 40 + field[0] = field[1] 41 + field[-1] = field[-2] 42 + field[:,0] = -field[:,1] 43 + field[:,-1] = -field[:,-2] 44 + field[(0,0,-1,-1),(0,-1,0,-1)] = 0.5 * (field[(0,0,-2,-2),(1,-2,0,-1)] + field[(1,1,-1,-1),(0,-1,1,-2)]) 45 + 46 + def opposed_h_boundary(field): 47 + field[0] = -field[1] 48 + field[-1] = -field[-2] 49 + field[:,0] = field[:,1] 50 + field[:,-1] = field[:,-2] 51 + field[(0,0,-1,-1),(0,-1,0,-1)] = 0.5 * (field[(0,0,-2,-2),(1,-2,0,-1)] + field[(1,1,-1,-1),(0,-1,1,-2)]) 52 + 53 + 54 + def diffuse(field, rate, set_boundary, dt, h, w): 55 + a = dt * rate 56 + result = torch.clone(field) 57 + if field.shape != (h, w): 58 + print("bad field shape in diffuse") 59 + for n in range(20): 60 + convolution = a * convolve(result.unsqueeze(0), diffuse_kernel, bias=None, padding=[0], stride=[1])[0] 61 + result[1:h-1,1:w-1] = field[1:h-1,1:w-1] + convolution 62 + result /= 1 + 4 * a 63 + set_boundary(result) 64 + #result = result * ~border_mask[0] + field * border_mask[0] 65 + return result 66 + 67 + def advect(field, velocities, dt, h, w): 68 + dth, dtw = dt, dt 69 + inds_x = torch.arange(1,w-1).repeat(h-2,1).float() 70 + inds_y = torch.arange(1,h-1).repeat(w-2,1).t().float() 71 + inds_x += dtw * velocities[1,1:h-1,1:w-1] 72 + inds_y += dth * velocities[0,1:h-1,1:w-1] 73 + inds_x = inds_x.clamp(1.5, w - 2.5) 74 + inds_y = inds_y.clamp(1.5, h - 2.5) 75 + inds_x_i = inds_x.int() 76 + inds_y_i = inds_y.int() 77 + inds_x -= inds_x_i 78 + inds_y -= inds_y_i 79 + inds_x_inv = 1 - inds_x 80 + inds_y_inv = 1 - inds_y 81 + inds_x_i_next = inds_x_i + 1 82 + inds_y_i_next = inds_y_i + 1 83 + inds_x_all = torch.stack([inds_x_i, inds_x_i_next, inds_x_i, inds_x_i_next]) 84 + inds_y_all = torch.stack([inds_y_i, inds_y_i, inds_y_i_next, inds_y_i_next]) 85 + if field.shape[0] == 1: 86 + values = torch.cat([field[:,1:h-1,1:w-1] * inds_x_inv * inds_y_inv, 87 + field[:,1:h-1,1:w-1] * inds_x * inds_y_inv, 88 + field[:,1:h-1,1:w-1] * inds_x_inv * inds_y, 89 + field[:,1:h-1,1:w-1] * inds_x * inds_y]) 90 + res = torch.zeros_like(field[0]) 91 + res.index_put_((inds_y_all, inds_x_all), values, accumulate=True) 92 + continuous_boundary(res) 93 + return res.unsqueeze(0) 94 + else: 95 + values = torch.stack([field[:,1:h-1,1:w-1] * inds_x_inv * inds_y_inv, 96 + field[:,1:h-1,1:w-1] * inds_x * inds_y_inv, 97 + field[:,1:h-1,1:w-1] * inds_x_inv * inds_y, 98 + field[:,1:h-1,1:w-1] * inds_x * inds_y]) 99 + res = torch.zeros_like(field) 100 + res[0].index_put_((inds_y_all, inds_x_all), values[:,0,:,:], accumulate=True) 101 + res[1].index_put_((inds_y_all, inds_x_all), values[:,1,:,:], accumulate=True) 102 + opposed_h_boundary(res[1]) 103 + opposed_v_boundary(res[0]) 104 + return res 105 + 106 + def project(field, h, w): 107 + hx = -1# / w 108 + hy = -1# / h 109 + divergence = convolve(field[1].unsqueeze(0), project_kernel_x, bias=None, stride=[1], padding=[0])[0] * hx 110 + divergence += convolve(field[0].unsqueeze(0), project_kernel_y, bias=None, stride=[1], padding=[0])[0] * hy 111 + divergence *= 0.5 112 + continuous_boundary(divergence) 113 + p = torch.zeros_like(field[0]) 114 + for i in range(40): 115 + p[1:h-1,1:w-1] = (divergence + convolve(p.unsqueeze(0), diffuse_kernel, bias=None, stride=[1], padding=[0])[0]) / 4 116 + continuous_boundary(p) 117 + field[1,1:h-1,1:w-1] += 0.5 * convolve(p.unsqueeze(0), project_kernel_x, bias=None, stride=[1], padding=[0])[0] / hx 118 + field[0,1:h-1,1:w-1] += 0.5 * convolve(p.unsqueeze(0), project_kernel_y, bias=None, stride=[1], padding=[0])[0] / hy 119 + opposed_h_boundary(field[1]) 120 + opposed_v_boundary(field[0]) 121 + 122 + @ifmain(__name__, _s) 123 + @timed 124 + def _main(settings): 125 + globals().update(settings.get()) 126 + run_dir = time.strftime(f"%d.%m.%Y/{name}_t%H.%M.%S") 127 + Path("out/" + run_dir).mkdir(parents=True, exist_ok=True) 128 + with open(f"out/{run_dir}/settings.py", "w") as f: 129 + f.write(settings.src) 130 + torch.set_default_device(device) 131 + 132 + 133 + #velocities = torch.zeros([2, h, w]) 134 + upsample = torch.nn.Upsample(size=[h, w], mode='bilinear') 135 + velocities = torch.randn([2, h//32, w//32]) * 100 136 + densities = torch.ones([1, h, w]) * 0.1 137 + 138 + cg = cgrid(h, w, 0 + 0j, 4 + 4j, dtype=torch.float, ctype=torch.cfloat) 139 + 140 + velocities = upsample(velocities.unsqueeze(0))[0]#cg.real - cg.imag * torch.sin(cg.real) 141 + #velocities[1] = 0#-cg.imag - cg.real * torch.cos(cg.imag) 142 + 143 + #for i in range(3): 144 + # for j in range(3): 145 + # w7 = w // 7 146 + # h7 = h // 7 147 + # densities[0][(2*j+1)*h7:(2*j+2)*h7][(2*i+1)*w7:(2*i+2)*w7] = 1 148 + # print((2*j+1)*h7) 149 + # print((2*j+2)*h7) 150 + 151 + 152 + #w7 = w//7 153 + #h7 = h//7 154 + #for i in range(w7): 155 + # for j in range(h7): 156 + # for a in range(3): 157 + # for b in range(3): 158 + # densities[0][j + h7 * (2 * a + 1)][w7 * (2 * b + 1) + i] = 1.0 159 + 160 + initial = torch.cat((velocities * 0.5 + 0.5, densities), dim=0) 161 + save(initial, f"{run_dir}/initial") 162 + del initial 163 + 164 + border_mask = torch.ones([2,h,w], dtype=torch.int) 165 + border_mask[:,1:h-1,1:w-1] = 0; 166 + 167 + for iteration in range(max_iterations): 168 + velocities[0,3 * h//4 - 10:(3*h//4),w//2-10:w//2+10] = -100 169 + velocities[1,3 * h//4:(3*h//4)+10,w//2-10:w//2+10] = -90 170 + velocities[0,h//4:(h//4)+10,w//2-3:w//2+3] = 100 171 + velocities[1,h//4:(h//4)+10,w//2-3:w//2+3] = 200 172 + velocities[0] += 0.01 173 + densities[0,3*h//4-10:(3*h//4),w//2-9:w//2+9] += 0.1 174 + #densities[0,h//4:(h//4)+10,w//2+30:w//2+55] += 0.1 175 + densities += 0.001 176 + velocities[0] = diffuse(velocities[0], v_diffusion_rate, opposed_h_boundary, timescale, h, w) 177 + velocities[1] = diffuse(velocities[1], v_diffusion_rate, opposed_v_boundary, timescale, h, w) 178 + project(velocities, h, w) 179 + velocities = advect(velocities, velocities, timescale, h, w) 180 + project(velocities, h, w) 181 + densities = diffuse(densities[0], m_diffusion_rate, continuous_boundary, m_timescale, h, w).unsqueeze(0) 182 + densities = advect(densities, velocities, m_timescale, h, w) 183 + densities *= 0.999 184 + if iteration % save_every == 0: 185 + #res = torch.cat((velocities * 0.5 + 0.5, densities), dim=0) 186 + #save(res, f"{run_dir}/{iteration}") 187 + msave(densities[0], f"{run_dir}/_{iteration}") 188 + 189 + final = torch.cat((velocities * 0.5 + 0.5, densities), dim=0) 190 + save(final, f"{run_dir}/final") 191 + 192 + 193 + 194 + 195 + 196 + 197 + 198 + 199 + 200 + 201 + 202 +
+156
fractal.py
··· 1 + import time 2 + import torch 3 + from PIL import Image 4 + from util import * 5 + 6 + import math 7 + 8 + # Settings 9 + 10 + w = 512 * 2 11 + h = w 12 + 13 + #x_range = [-1.8, -1.7] 14 + #y_range = [-0.095, 0.005] 15 + 16 + #ship 17 + #x_start = -1.8 18 + #x_span = 0.1 19 + #y_start = -0.095 20 + #y_span = 0.1 21 + 22 + x_start = -math.e / 2 23 + x_span = 1.54 24 + y_start = -2 25 + y_span = 2 26 + 27 + alpha = 2.5 28 + 29 + x_range = [x_start, x_start + x_span] 30 + y_range = [y_start, y_start + y_span] 31 + 32 + final_file = "gauss_1" 33 + 34 + device = "cuda" 35 + ctype = torch.cfloat 36 + dtype = torch.float 37 + 38 + # how many random samples in each point 39 + sample_ct = 40 40 + noise_scale = x_span / (2 * w) 41 + 42 + do_jitter = False 43 + jitter_scale = 2.5 / w 44 + jitter_chunk_size = 128 45 + 46 + # how many iterations of the z_(n+1) = f(z_n) recurrence 47 + iterations = 120 48 + 49 + # threshold for defining escape 50 + limit = 2.0 51 + 52 + if __name__ == "__main__": 53 + t0 = time.perf_counter() 54 + 55 + z_init = torch.zeros([h, w], dtype=ctype, device=device) 56 + c = torch.zeros([h, w], dtype=ctype, device=device) 57 + j = torch.zeros([h, w], dtype=ctype, device=device) 58 + j.imag = torch.ones([h, w], dtype=dtype, device=device) 59 + 60 + yspace = torch.linspace(y_range[0], y_range[1], h, dtype=dtype, device=device) 61 + xspace = torch.linspace(x_range[0], x_range[1], w, dtype=dtype, device=device) 62 + 63 + for x in range(h): 64 + c[x] += xspace 65 + 66 + for y in range(w): 67 + c[:, y] += yspace * 1j 68 + 69 + #save(c, "c") 70 + #save(z_init, "z0") 71 + 72 + #z = z_init 73 + 74 + totals = torch.zeros([h, w], dtype=torch.int, device=device) 75 + jitter = torch.randn([h // jitter_chunk_size, w // jitter_chunk_size], dtype=ctype, device=device) * jitter_scale 76 + if do_jitter: 77 + upsample = torch.nn.Upsample(size=[h, w], mode='nearest') 78 + jitter = torch.view_as_real(jitter).permute((2,0,1))[None, ...] 79 + jitter = torch.view_as_complex(upsample(jitter)[0].permute((1,2,0)).contiguous()) 80 + 81 + #csave(jitter, "jitter") 82 + #exit() 83 + 84 + for sample_idx in range(sample_ct): 85 + noise = torch.randn([h, w], dtype=ctype, device=device) 86 + z = z_init# + noise * noise_scale 87 + c_n = c + noise * noise_scale 88 + mask = torch.abs(z) < limit 89 + iters = torch.zeros_like(mask, dtype=torch.uint8) 90 + for i in range(iterations): 91 + #im = torch.abs(z.imag) * j 92 + #z = (torch.abs(z.real) + im)**2 + c_n 93 + z = torch.exp(-alpha * z) + c_n 94 + 95 + if do_jitter: 96 + z += jitter 97 + mask &= (torch.abs(z) < limit) 98 + iters += mask 99 + #for i in range(iterations): 100 + # im = torch.abs(z.imag) * j 101 + # z = (torch.abs(z.real) + im)**2 + c_n 102 + # totals += 103 + 104 + #totals += ~mask 105 + totals += iters 106 + #csave(mask, f"mask") 107 + 108 + #csave(z, f"z{i}") 109 + 110 + #msave(mask, f"mfinal_s{sample_idx}") 111 + #csave(z, f"zfinal_s{sample_idx}") 112 + 113 + #im = torch.log(iters) 114 + #im = iters 115 + #im = im / torch.max(im) 116 + 117 + #msave(im, f"ifinal_s{sample_idx}") 118 + #im = im * ~mask 119 + 120 + #totals = torch.log(1. - totals) 121 + #totals *= totals 122 + 123 + #m = totals == 0. #torch.max(totals) 124 + #totals = totals + torch.max(totals) * 0.5 * m 125 + 126 + totals = totals / sample_ct / iterations#torch.max(totals) 127 + msave(totals, final_file + "_") 128 + 129 + #msave(totals**2, final_file + "_sq") 130 + #msave(torch.sqrt(totals), final_file + "_rt") 131 + totals = 1. - totals 132 + #msave(totals, final_file + "_inv_") 133 + #msave(totals**2, final_file + "_inv_sq") 134 + msave(torch.sqrt(totals), final_file + "_inv_rt") 135 + 136 + 137 + #csave(torch.nan_to_num(z, 0) * (mask), "zmfinal") 138 + 139 + #zm_r = torch.nan_to_num(z.real, 0) * mask 140 + #zm_i = torch.nan_to_num(z.imag, 0) * mask 141 + 142 + #print(torch.max(torch.abs(torch.nan_to_num(z, 0) * mask))) 143 + 144 + #msave(im, "imfinal") 145 + 146 + #final = torch.stack([zm_r / 4 + 0.5, zm_i / 4 + 0.5, im]) 147 + 148 + #save(final, "final") 149 + 150 + print(f"done in {time.perf_counter() - t0}s") 151 + 152 + 153 + 154 + 155 + 156 +
+125
lyapunov.py
··· 1 + import torch 2 + import time 3 + import itertools 4 + 5 + from PIL import Image 6 + # monochrome, 0 to 1 7 + def mpilify(z): 8 + z_3d = torch.stack([z, z, z]) 9 + z_norm = z_3d.clamp(0, 1) 10 + z_np = z_norm.detach().cpu().permute(1, 2, 0).numpy() 11 + z_bytes = (z_np * 255).round().astype("uint8") 12 + return Image.fromarray(z_bytes) 13 + 14 + def msave(x, f): 15 + mpilify(x).save(f"out/{f}.png") 16 + 17 + # config 18 + 19 + final_file = "abaaa_squish_tall2" 20 + 21 + # 2 ** 9 = 512; 10 => 1024; 11 => 2048 22 + scale = 2 ** 9 23 + h = scale * 2 24 + w = scale 25 + 26 + origin = 3 + (1 / 3), 2 27 + span = 1 / 3, 2 28 + 29 + zooms = [] 30 + 31 + dev = "cuda" 32 + t_fp = torch.double 33 + t_cfp = torch.cdouble 34 + 35 + seq = "ABAAAAAAAA" 36 + c = 3.5 37 + 38 + iterations = 100 39 + 40 + 41 + # funcs 42 + 43 + def logistic_map(r, x): 44 + x_inf_mask = torch.isinf(x) 45 + _x = r * x * (1 - x) 46 + return torch.nan_to_num(_x) * ~x_inf_mask - x * x_inf_mask 47 + 48 + def lyapunov_term(r, x): 49 + x_inf_mask = torch.isinf(x) 50 + term = torch.log(torch.abs(r * (1 - 2 * x))) 51 + return torch.nan_to_num(term) * ~x_inf_mask - x * x_inf_mask 52 + 53 + def repeat(seq): 54 + i = 0 55 + l = len(seq) 56 + while True: 57 + yield seq[i%l] 58 + i += 1 59 + 60 + def c_avg(prev, val, n): 61 + """cumulative average 62 + prev: previous result (should be zero for n=1) 63 + val: next value 64 + n: how many values given so far, including this value""" 65 + prev_inf_mask = torch.isinf(prev) 66 + _next = prev + (val - prev) / n 67 + return torch.nan_to_num(_next) * ~prev_inf_mask + prev * prev_inf_mask 68 + 69 + # init 70 + 71 + for z in zooms: 72 + # TODO shift & scale 73 + pass 74 + 75 + x_min = origin[0] 76 + y_min = origin[1] 77 + x_max = origin[0] + span[0] 78 + y_max = origin[1] + span[1] 79 + 80 + 81 + 82 + 83 + 84 + def main(): 85 + x = torch.ones([h, w], device=dev, dtype=t_fp) / 2 86 + ab = torch.zeros([h, w], device=dev, dtype=t_cfp) 87 + 88 + lyapunov_avg = torch.zeros([h, w], device=dev, dtype=t_fp) 89 + 90 + yspace = torch.linspace(y_min, y_max, h, dtype=t_fp, device=dev) 91 + xspace = torch.linspace(x_min, x_max, w, dtype=t_fp, device=dev) 92 + for _x in range(h): 93 + ab[_x] += xspace 94 + for _y in range(w): 95 + ab[:, _y] += yspace * 1j 96 + 97 + gen = itertools.islice(repeat(seq), iterations) 98 + 99 + for idx, seq_elem in enumerate(gen): 100 + if seq_elem == "C": 101 + r = c 102 + else: 103 + r = ab.real if seq_elem == "A" else ab.imag 104 + 105 + if idx > 0: 106 + lyapunov_avg = c_avg(lyapunov_avg, lyapunov_term(r, x), idx) 107 + 108 + if idx < iterations - 1: 109 + x = logistic_map(r, x) 110 + 111 + #msave(torch.tanh(lyapunov_avg / 4) / 2 + 0.5, f"avg_{idx}") 112 + 113 + result = torch.tanh(lyapunov_avg) / 2 + 0.5 114 + #msave(result, final_file) 115 + #msave(1 - result**2, final_file + "sq") 116 + #msave(1 - torch.sqrt(result), final_file + "sqrt") 117 + #msave(result ** 2, final_file + "sq_p") 118 + msave(torch.sqrt(result), final_file + "sqrt_p") 119 + 120 + 121 + if __name__ == "__main__": 122 + t0 = time.perf_counter() 123 + main() 124 + print(f"done in {time.perf_counter() - t0}s") 125 +
+151
raytrace.py
··· 1 + import torch 2 + import time 3 + from math import sin, cos 4 + 5 + from util import msave 6 + 7 + out_file = "qjulia_11_40" 8 + 9 + # 2**10 = 1024 10 + scale = 2 ** 11 11 + w = 2 * scale // 3 12 + h = scale 13 + 14 + max_steps = 500 15 + mandel_iters = 20#50 16 + max_samples = 4 17 + 18 + noise_scale = 1. / (2 * scale) 19 + 20 + zoom = 1 21 + #frame_center = -0.5, -0.5, 0 22 + frame_center = 0, 0, -1 * zoom 23 + step_dir = 0, 0, 1 24 + #frame_span = 1, 1 25 + frame_span = 2 * zoom, 3 * zoom 26 + max_depth = 2 * zoom 27 + 28 + dev = "cuda" 29 + t_cfp = torch.cfloat 30 + t_fp = torch.float 31 + 32 + pi = 3.14159265358979323 33 + a, b, c = pi / 3, pi + 0.2834298, pi / 4 34 + 35 + sina, sinb, sinc = sin(a), sin(b), sin(c) 36 + cosa, cosb, cosc = cos(a), cos(b), cos(c) 37 + 38 + rot_x = torch.tensor([ 39 + [1,0,0], 40 + [0,cosc,-sinc], 41 + [0,sinc,cosc]], dtype=t_fp, device=dev) 42 + rot_y = torch.tensor([ 43 + [cosb,0,sinb], 44 + [0,1,0], 45 + [-sinb,0,cosb]], dtype=t_fp, device=dev) 46 + rot_z = torch.tensor([ 47 + [cosa,-sina,0], 48 + [sina,cosa,0], 49 + [0,0,1]], dtype=t_fp, device=dev) 50 + 51 + rot = rot_z @ rot_y @ rot_x 52 + 53 + quat_last = 0 54 + quat_c = [-1, 0.2, 0, 0] 55 + 56 + def hit_0(pos): 57 + z = 0 #pos[2] * (0.2 * pos[2] + 0.5j) 58 + c = torch.view_as_complex(pos[0:2].permute(1,2,0).contiguous()) 59 + for i in range(mandel_iters): 60 + z = z**2 + pos[2]*z + c 61 + return torch.abs(z) < 2 62 + 63 + def hit_1(pos): 64 + #v = rot @ pos #torch.clone(pos) 65 + v = (pos.permute(1,2,0) @ rot).permute(2,0,1) 66 + for i in range(mandel_iters): 67 + x = v[0] 68 + y = v[1] 69 + z = v[2] 70 + x_term = 1j * torch.sqrt(y**2 + z**2) 71 + y_term = 1j * torch.sqrt(x**2 + z**2) 72 + z_term = 1j * torch.sqrt(y**2 + x**2) 73 + v[0] = (x + x_term)**9 / 2 + (x - x_term)**9 / 2 + pos[0] 74 + v[1] = (y + y_term)**9 / 2 + (y - y_term)**9 / 2 + pos[1] 75 + v[2] = (z + z_term)**9 / 2 + (z - z_term)**9 / 2 + pos[2] 76 + n = torch.norm(v, dim=0) 77 + return ~(n < 2) 78 + 79 + def hit_3(pos): 80 + v = (pos.permute(1,2,0) @ rot).permute(2,0,1) 81 + v = torch.cat((v, torch.ones([1,h,w], dtype=t_fp, device=dev) * quat_last)) 82 + for i in range(mandel_iters): 83 + r, a, b, c = v[0], v[1], v[2], v[3] 84 + _r = r*r - a*a - b*b - c*c 85 + _a = 2*r*a 86 + _b = 2*r*b 87 + _c = 2*r*c 88 + v[0] = _r + quat_c[0] 89 + v[1] = _a + quat_c[1] 90 + v[2] = _b + quat_c[2] 91 + v[3] = _c + quat_c[3] 92 + n = torch.norm(v, dim=0) 93 + return n < 4 94 + 95 + def hit_2(pos): 96 + return torch.norm(pos, dim=0) > 0# 0.9 97 + 98 + def main(): 99 + start = torch.zeros([h, w], device=dev, dtype=t_cfp) 100 + #res = torch.zeros([h,w], device=dev, dtype=t_fp) 101 + res = torch.ones([h,w], device=dev, dtype=t_fp) 102 + 103 + half_x = frame_span[0] / 2 104 + half_y = frame_span[1] / 2 105 + yspace = torch.linspace(-half_y, half_y, h, dtype=t_fp, device=dev) 106 + xspace = torch.linspace(-half_x, half_x, w, dtype=t_fp, device=dev) 107 + for _x in range(h): 108 + start[_x] += xspace 109 + for _y in range(w): 110 + start[:, _y] += yspace * 1j 111 + 112 + start = torch.stack((start.real - frame_center[0], start.imag - frame_center[1], torch.ones([h, w], dtype=t_fp, device=dev) * frame_center[2])) 113 + 114 + step = torch.stack([torch.ones([h,w], device=dev, dtype=t_fp)*x for x in step_dir]) * max_depth / max_steps 115 + 116 + s_res = [] 117 + v_res = [] 118 + for sample_idx in range(max_samples): 119 + pos = start + torch.randn([3,h,w], device=dev, dtype=t_fp) * noise_scale 120 + s_res.append(torch.ones_like(res)) 121 + v_res.append(torch.zeros_like(res)) 122 + for step_idx in range(max_steps): 123 + pos += step 124 + hit = hit_3(pos) #* hit_2(pos) 125 + hit_adj = hit * (s_res[sample_idx] > (step_idx / max_steps)) 126 + hit_val = hit_adj * (step_idx / max_steps) 127 + #hit_val = hit * (1 / max_steps) 128 + v_res[sample_idx] = v_res[sample_idx] + hit * (1. / max_steps) 129 + s_res[sample_idx] = (s_res[sample_idx] * ~hit_adj) + hit_val #hit / max_samples / max_steps 130 + if step_idx % 10 == 999: 131 + print(step_idx) 132 + print(hit.sum()) 133 + print(hit_adj.sum()) 134 + print(s_res[sample_idx].sum()) 135 + 136 + s_res = torch.stack(s_res).mean(dim=0) 137 + v_res = torch.stack(v_res).mean(dim=0) 138 + 139 + #res = res % (1/5) 140 + #res = torch.sqrt(res)# * 5) 141 + msave(1-s_res, f"{out_file}_s") 142 + msave(v_res, f"{out_file}_v") 143 + #msave(1-res, out_file) 144 + #res_mv = torch.cat([res, torch.flip(res, dims=(0,))]) 145 + #res_mh = torch.cat([res_mv, torch.flip(res_mv, dims=(1,))], dim=1) 146 + #msave(res_mh, out_file) 147 + 148 + if __name__ == "__main__": 149 + t0 = time.perf_counter() 150 + main() 151 + print(f"done in {time.perf_counter() - t0}")
+124
slime.py
··· 1 + import time 2 + import inspect 3 + from random import randint 4 + from pathlib import Path 5 + 6 + import torch 7 + from torchvision.transforms import GaussianBlur 8 + 9 + from util import * 10 + 11 + @settings 12 + def _s(): 13 + from math import tau 14 + 15 + name = "slime" 16 + 17 + scale = 2 ** 11 18 + w = scale 19 + h = scale 20 + 21 + max_iterations = 1000 22 + save_mod = 100 23 + 24 + particle_count = 1000000 // 1 25 + decay_rate = 0.9 26 + 27 + view_angle = tau / 8 28 + view_distance = 1.414 * 100 29 + 30 + direction_count = 4 31 + turn_amount = tau / direction_count * 1.2 32 + move_distance = 1.414 * 3 33 + 34 + blur_size = 5 35 + blur_sigma = 1 36 + 37 + dtype = torch.double 38 + ctype = torch.cdouble 39 + device = "cuda" 40 + 41 + def offset_coord(position, angle, distance): 42 + res = torch.clone(position) 43 + res[:,0] += (torch.sin(angle) * distance) 44 + res[:,1] += (torch.cos(angle) * distance) 45 + return res 46 + 47 + def clamp_coords(t, h, w): 48 + t[:,0] = (t[:,0] + (h-1)) % (h-1) 49 + t[:,1] = (t[:,1] + (w-1)) % (w-1) 50 + 51 + def idxput(tensor, indices, value): 52 + tensor.index_put_((indices[:,0],indices[:,1]), value) 53 + 54 + @ifmain(__name__, _s) 55 + @timed 56 + def _main(settings): 57 + globals().update(settings.get()) 58 + run_dir = time.strftime(f"%d.%m.%Y/{name}_t%H.%M.%S") 59 + Path("out/" + run_dir).mkdir(parents=True, exist_ok=True) 60 + with open(f"out/{run_dir}/settings.py", "w") as f: 61 + f.write(settings.src) 62 + torch.set_default_device(device) 63 + 64 + blur = GaussianBlur(blur_size, blur_sigma) 65 + 66 + # p_ denotes particle data 67 + p_positions = torch.rand([particle_count,2]) 68 + p_positions[:,0] *= h 69 + p_positions[:,1] *= w 70 + 71 + p_directions = ((torch.rand(particle_count)*direction_count).floor() / direction_count) * 2 * tau 72 + 73 + world = torch.zeros([3,h,w], dtype=dtype) 74 + scratch = torch.zeros([3,h,w], dtype=dtype) 75 + t0 = torch.tensor([0], dtype=dtype) 76 + t1 = torch.tensor([1], dtype=dtype) 77 + 78 + for iteration in range(max_iterations): 79 + 80 + # sense 81 + angles = (-view_angle, 0, view_angle) 82 + angles = (p_directions + a for a in angles) 83 + sensor_coords = (offset_coord(p_positions, a, view_distance) for a in angles) 84 + sc = [sc for sc in sensor_coords] 85 + for c in sc: 86 + clamp_coords(c, h, w) 87 + 88 + sci = [c.long() for c in sc] 89 + 90 + senses = [world[0].take(c[:,0] * w + c[:,1]) for c in sci] 91 + sense_neg = senses[0] > senses[1] 92 + sense_pos = senses[2] > senses[1] 93 + #sense_mid = ~sense_neg * ~sense_pos # forward highest 94 + sense_out = sense_neg * sense_pos # forward lowest 95 + sense_neg = sense_neg * ~sense_out # negative lowest 96 + sense_pos = sense_pos * ~sense_out # negative highest 97 + 98 + # rotate 99 + #p_directions += torch.rand(particle_count) * sense_out * turn_amount 100 + mask = torch.rand(particle_count) > 0.5 101 + p_directions += mask * sense_out * turn_amount 102 + p_directions -= ~mask * sense_out * turn_amount 103 + p_directions += sense_pos * turn_amount 104 + p_directions -= sense_neg * turn_amount 105 + 106 + # move 107 + p_positions = offset_coord(p_positions, p_directions, move_distance) 108 + clamp_coords(p_positions, h, w) 109 + 110 + # deposit 111 + idxput(world[0], p_positions.long(), t1) 112 + 113 + # diffuse 114 + world = blur(world) 115 + 116 + # render 117 + if iteration % save_mod == 0 and iteration != 0: 118 + msave(world[0], f"{run_dir}/{iteration}") 119 + 120 + # decay 121 + if iteration < max_iterations - 1: 122 + world *= decay_rate 123 + 124 + msave(world[0], f"{run_dir}/final")
+108
util.py
··· 1 + import torch 2 + from PIL import Image 3 + 4 + class Settings(dict): 5 + __getattr__ = dict.__getitem__ 6 + __setattr__ = dict.__setitem__ 7 + __delattr__ = dict.__delitem__ 8 + 9 + def __init__(self, _vars): 10 + for key in _vars: 11 + self[key] = _vars[key] 12 + 13 + class _Settings(): 14 + def __init__(self, _get, _src): 15 + self.get = _get 16 + self.src = _src 17 + 18 + def settings(f): 19 + import inspect 20 + f_src = inspect.getsource(f) 21 + f_lines = f_src.split('\n') 22 + f_lines = [l for l in f_lines if f"@settings" not in l] 23 + f_lines.append(" return Settings(vars())") 24 + new_src = "\n".join(f_lines) 25 + exec(new_src) 26 + return _Settings(eval(f.__name__),new_src) 27 + 28 + def first(l, p): 29 + return next((idx,value) for idx,value in enumerate(l) if p(value)) 30 + 31 + def timed(f): 32 + import time 33 + def _f(*args, **kwargs): 34 + t0 = time.perf_counter() 35 + f(*args, **kwargs) 36 + print(f"{f.__name__}: {time.perf_counter() - t0}s") 37 + return _f 38 + 39 + def ifmain(name, provide_arg=None): 40 + if name == "__main__": 41 + if provide_arg is None: 42 + return lambda f: f() 43 + return lambda f: f(provide_arg) 44 + return lambda f: f 45 + 46 + def cpilify(z): 47 + z_3d = torch.stack([z.real, z.imag, torch.zeros_like(z.real)]) 48 + z_norm = (z_3d / 2 + 0.5).clamp(0, 1) 49 + z_np = z_norm.detach().cpu().permute(1, 2, 0).numpy() 50 + z_bytes = (z_np * 255).round().astype("uint8") 51 + return Image.fromarray(z_bytes) 52 + 53 + # complex, from -1 to 1 & -i to i 54 + def csave(x, f): 55 + cpilify(x).save(f"out/{f}.png") 56 + 57 + # monochrome, 0 to 1 58 + def mpilify(z): 59 + z_3d = torch.stack([z, z, z]) 60 + z_norm = z_3d.clamp(0, 1) 61 + z_np = z_norm.detach().cpu().permute(1, 2, 0).numpy() 62 + z_bytes = (z_np * 255).round().astype("uint8") 63 + return Image.fromarray(z_bytes) 64 + 65 + def msave(x, f): 66 + mpilify(x).save(f"out/{f}.png") 67 + 68 + # 3 channels 69 + def pilify(z): 70 + z_norm = z.clamp(0, 1) 71 + z_np = z_norm.detach().cpu().permute(1, 2, 0).numpy() 72 + z_bytes = (z_np * 255).round().astype("uint8") 73 + return Image.fromarray(z_bytes) 74 + 75 + def save(x, f): 76 + pilify(x).save(f"out/{f}.png") 77 + 78 + # grid of complex numbers 79 + def cgrid(h,w,center,span,ctype=torch.cdouble,dtype=torch.double,**_): 80 + g = torch.zeros([h, w], dtype=ctype) 81 + 82 + low = center - span / 2 83 + hi = center + span / 2 84 + 85 + yspace = torch.linspace(low.imag, hi.imag, h, dtype=dtype) 86 + xspace = torch.linspace(low.real, hi.real, w, dtype=dtype) 87 + 88 + for _x in range(h): 89 + g[_x] += xspace 90 + for _y in range(w): 91 + g[:, _y] += yspace * 1j 92 + 93 + return g 94 + 95 + # result, iterations; iterations == -1 if no convergence before limit 96 + def gauss_seidel(a, b): 97 + x = torch.zeros_like(b) 98 + itlim = 1000 99 + for it in range(1, itlim): 100 + xn = torch.zeros_like(x) 101 + for i in range(a.shape[0]): 102 + s1 = a[i, :i].dot(xn[:i]) 103 + s2 = a[i, i+1:].dot(x[i+1:]) 104 + xn[i] = (b[i] - s1 - s2) / a[i, i] 105 + if torch.allclose(x, xn, rtol=1e-8): 106 + return xn, it 107 + x = xn 108 + return x, -1