A modern Music Player Daemon based on Rockbox open source high quality audio player
libadwaita
audio
rust
zig
deno
mpris
rockbox
mpd
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2022 Aidan MacDonald
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#include "linuxboot.h"
23#include "system.h"
24#include "core_alloc.h"
25#include "crc32.h"
26#include "inflate.h"
27#include "file.h"
28#include <string.h>
29
30/* compression support options - can be decided per target if needed,
31 * for now default to enabling everything */
32#define HAVE_UIMAGE_COMP_NONE
33#define HAVE_UIMAGE_COMP_GZIP
34
35enum {
36 E_OUT_OF_MEMORY = -1,
37 E_BUFFER_OVERFLOW = -2,
38 E_MAGIC_MISMATCH = -3,
39 E_HCRC_MISMATCH = -4,
40 E_DCRC_MISMATCH = -5,
41 E_UNSUPPORTED_COMPRESSION = -6,
42 E_READ = -7,
43 E_INFLATE = -8,
44 E_INFLATE_UNCONSUMED = -9,
45};
46
47uint32_t uimage_crc(uint32_t crc, const void* data, size_t size)
48{
49 return letoh32(crc_32r(data, size, htole32(crc ^ 0xffffffff))) ^ 0xffffffff;
50}
51
52uint32_t uimage_calc_hcrc(const struct uimage_header* uh)
53{
54 struct uimage_header h = *uh;
55 uimage_set_hcrc(&h, 0);
56 return uimage_crc(0, &h, sizeof(h));
57}
58
59static int uimage_check_header(const struct uimage_header* uh)
60{
61 if(uimage_get_magic(uh) != IH_MAGIC)
62 return E_MAGIC_MISMATCH;
63
64 if(uimage_get_hcrc(uh) != uimage_calc_hcrc(uh))
65 return E_HCRC_MISMATCH;
66
67 return 0;
68}
69
70static int uimage_alloc_state(const struct uimage_header* uh)
71{
72 size_t size;
73
74 switch(uimage_get_comp(uh)) {
75#ifdef HAVE_UIMAGE_COMP_NONE
76 case IH_COMP_NONE:
77 return 0;
78#endif
79
80#ifdef HAVE_UIMAGE_COMP_GZIP
81 case IH_COMP_GZIP:
82 size = inflate_size + inflate_align - 1;
83 return core_alloc_ex(size, &buflib_ops_locked);
84#endif
85
86 default:
87 return E_UNSUPPORTED_COMPRESSION;
88 }
89}
90
91#ifdef HAVE_UIMAGE_COMP_GZIP
92struct uimage_inflatectx
93{
94 uimage_reader reader;
95 void* rctx;
96 uint32_t dcrc;
97 size_t remain;
98 int err;
99};
100
101static uint32_t uimage_inflate_reader(void* block, uint32_t block_size, void* ctx)
102{
103 struct uimage_inflatectx* c = ctx;
104 ssize_t len = c->reader(block, block_size, c->rctx);
105 if(len < 0) {
106 c->err = E_READ;
107 return 0;
108 }
109
110 len = MIN(c->remain, (size_t)len);
111 c->remain -= len;
112 c->dcrc = uimage_crc(c->dcrc, block, len);
113 return len;
114}
115
116static int uimage_decompress_gzip(const struct uimage_header* uh, int state_h,
117 void* out, size_t* out_size,
118 uimage_reader reader, void* rctx)
119{
120 size_t hbufsz = inflate_size + inflate_align - 1;
121 void* hbuf = core_get_data(state_h);
122 ALIGN_BUFFER(hbuf, hbufsz, inflate_align);
123
124 struct uimage_inflatectx r_ctx;
125 r_ctx.reader = reader;
126 r_ctx.rctx = rctx;
127 r_ctx.dcrc = 0;
128 r_ctx.remain = uimage_get_size(uh);
129 r_ctx.err = 0;
130
131 struct inflate_bufferctx w_ctx;
132 w_ctx.buf = out;
133 w_ctx.end = out + *out_size;
134
135 int ret = inflate(hbuf, INFLATE_GZIP,
136 uimage_inflate_reader, &r_ctx,
137 inflate_buffer_writer, &w_ctx);
138 if(ret) {
139 if(r_ctx.err)
140 return r_ctx.err;
141 else if(w_ctx.end == w_ctx.buf)
142 return E_BUFFER_OVERFLOW;
143 else
144 /* note: this will likely mask DCRC_MISMATCH errors */
145 return E_INFLATE;
146 }
147
148 if(r_ctx.remain > 0)
149 return E_INFLATE_UNCONSUMED;
150 if(r_ctx.dcrc != uimage_get_dcrc(uh))
151 return E_DCRC_MISMATCH;
152
153 *out_size = w_ctx.end - w_ctx.buf;
154 return 0;
155}
156#endif /* HAVE_UIMAGE_COMP_GZIP */
157
158static int uimage_decompress(const struct uimage_header* uh, int state_h,
159 void* out, size_t* out_size,
160 uimage_reader reader, void* rctx)
161{
162 size_t in_size = uimage_get_size(uh);
163 ssize_t len;
164
165 switch(uimage_get_comp(uh)) {
166#ifdef HAVE_UIMAGE_COMP_NONE
167 case IH_COMP_NONE:
168 if(*out_size < in_size)
169 return E_BUFFER_OVERFLOW;
170
171 len = reader(out, in_size, rctx);
172 if(len < 0 || (size_t)len != in_size)
173 return E_READ;
174
175 if(uimage_crc(0, out, in_size) != uimage_get_dcrc(uh))
176 return E_DCRC_MISMATCH;
177
178 *out_size = in_size;
179 break;
180#endif
181
182#ifdef HAVE_UIMAGE_COMP_GZIP
183 case IH_COMP_GZIP:
184 return uimage_decompress_gzip(uh, state_h, out, out_size, reader, rctx);
185#endif
186
187 default:
188 return E_UNSUPPORTED_COMPRESSION;
189 }
190
191 return 0;
192}
193
194int uimage_load(struct uimage_header* uh, size_t* out_size,
195 uimage_reader reader, void* rctx)
196{
197 if(reader(uh, sizeof(*uh), rctx) != (ssize_t)sizeof(*uh))
198 return E_READ;
199
200 int ret = uimage_check_header(uh);
201 if(ret)
202 return ret;
203
204 int state_h = uimage_alloc_state(uh);
205 if(state_h < 0)
206 return E_OUT_OF_MEMORY;
207
208 *out_size = 0;
209 int out_h = core_alloc_maximum(out_size, &buflib_ops_locked);
210 if(out_h <= 0) {
211 ret = E_OUT_OF_MEMORY;
212 goto err;
213 }
214
215 ret = uimage_decompress(uh, state_h, core_get_data(out_h), out_size,
216 reader, rctx);
217 if(ret)
218 goto err;
219
220 core_shrink(out_h, NULL, *out_size);
221 ret = 0;
222
223 err:
224 core_free(state_h);
225 if(out_h > 0) {
226 if(ret == 0)
227 ret = out_h;
228 else
229 core_free(out_h);
230 }
231
232 return ret;
233}
234
235ssize_t uimage_fd_reader(void* buf, size_t size, void* ctx)
236{
237 int fd = (intptr_t)ctx;
238 return read(fd, buf, size);
239}
240
241/* Linux's self-extracting kernels are broken on MIPS. The decompressor stub
242 * doesn't flush caches after extracting the kernel code which can cause the
243 * boot to fail horribly. This has been true since at least 2009 and at the
244 * time of writing (2022) it's *still* broken.
245 *
246 * The FiiO M3K and Shanling Q1 both have broken kernels of this type, so we
247 * work around this by replacing the direct call to the kernel entry point with
248 * a thunk that adds the necessary cache flush.
249 */
250uint32_t mips_linux_stub_get_entry(void** code_start, size_t code_size)
251{
252 /* The jump to the kernel entry point looks like this:
253 *
254 * move a0, s0
255 * move a1, s1
256 * move a2, s2
257 * move a3, s3
258 * ...
259 * la k0, KERNEL_ENTRY
260 * jr k0
261 * --- or in kernels since 2021: ---
262 * la t9, KERNEL_ENTRY
263 * jalr t9
264 *
265 * We're trying to identify this code and decode the kernel entry
266 * point address, and return a suitable address where we can patch
267 * in a call to our thunk.
268 */
269
270 /* We should only need to scan within the first 128 bytes
271 * but do up to 256 just in case. */
272 uint32_t* start = *code_start;
273 uint32_t* end = start + (MIN(code_size, 256) + 3) / 4;
274
275 /* Scan for the "move aN, sN" sequence */
276 uint32_t* move_instr = start;
277 for(move_instr += 4; move_instr < end; ++move_instr) {
278 if(move_instr[-4] == 0x02002021 && /* move a0, s0 */
279 move_instr[-3] == 0x02202821 && /* move a1, s1 */
280 move_instr[-2] == 0x02403021 && /* move a2, s2 */
281 move_instr[-1] == 0x02603821) /* move a3, s3 */
282 break;
283 }
284
285 if(move_instr == end)
286 return 0;
287
288 /* Now search forward for the next jr/jalr instruction */
289 int jreg = 0;
290 uint32_t* jump_instr = move_instr;
291 for(; jump_instr != end; ++jump_instr) {
292 if((jump_instr[0] & 0xfc1ff83f) == 0xf809 ||
293 (jump_instr[0] & 0xfc00003f) == 0x8) {
294 /* jalr rN */
295 jreg = (jump_instr[0] >> 21) & 0x1f;
296 break;
297 }
298 }
299
300 /* Need room here for 4 instructions. Assume everything between the
301 * moves and the jump is safe to overwrite; otherwise, we'll need to
302 * take a different approach.
303 *
304 * Count +1 instruction for the branch delay slot and another +1 because
305 * "move_instr" points to the instruction following the last move. */
306 if(jump_instr - move_instr + 2 < 4)
307 return 0;
308 if(!jreg)
309 return 0;
310
311 /* Now scan from the end of the move sequence until the jump instruction
312 * and try to reconstruct the entry address. We check for lui/ori/addiu. */
313 const uint32_t lui_mask = 0xffff0000;
314 const uint32_t lui = 0x3c000000 | (jreg << 16);
315 const uint32_t ori_mask = 0xffff0000;
316 const uint32_t ori = 0x34000000 | (jreg << 21) | (jreg << 16);
317 const uint32_t addiu_mask = 0xffff0000;
318 const uint32_t addiu = 0x24000000 | (jreg << 21) | (jreg << 16);
319
320 /* Can use any initial value here */
321 uint32_t jreg_val = 0xdeadbeef;
322
323 for(uint32_t* instr = move_instr; instr != jump_instr; ++instr) {
324 if((instr[0] & lui_mask) == lui)
325 jreg_val = (instr[0] & 0xffff) << 16;
326 else if((instr[0] & ori_mask) == ori)
327 jreg_val |= instr[0] & 0xffff;
328 else if((instr[0] & addiu_mask) == addiu)
329 jreg_val += instr[0] & 0xffff;
330 }
331
332 /* Success! Probably! */
333 *code_start = move_instr;
334 return jreg_val;
335}