at v3.15-rc1 540 lines 14 kB view raw
1/* 2 * Tty buffer allocation management 3 */ 4 5#include <linux/types.h> 6#include <linux/errno.h> 7#include <linux/tty.h> 8#include <linux/tty_driver.h> 9#include <linux/tty_flip.h> 10#include <linux/timer.h> 11#include <linux/string.h> 12#include <linux/slab.h> 13#include <linux/sched.h> 14#include <linux/wait.h> 15#include <linux/bitops.h> 16#include <linux/delay.h> 17#include <linux/module.h> 18#include <linux/ratelimit.h> 19 20 21#define MIN_TTYB_SIZE 256 22#define TTYB_ALIGN_MASK 255 23 24/* 25 * Byte threshold to limit memory consumption for flip buffers. 26 * The actual memory limit is > 2x this amount. 27 */ 28#define TTYB_DEFAULT_MEM_LIMIT 65536 29 30/* 31 * We default to dicing tty buffer allocations to this many characters 32 * in order to avoid multiple page allocations. We know the size of 33 * tty_buffer itself but it must also be taken into account that the 34 * the buffer is 256 byte aligned. See tty_buffer_find for the allocation 35 * logic this must match 36 */ 37 38#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF) 39 40 41/** 42 * tty_buffer_lock_exclusive - gain exclusive access to buffer 43 * tty_buffer_unlock_exclusive - release exclusive access 44 * 45 * @port - tty_port owning the flip buffer 46 * 47 * Guarantees safe use of the line discipline's receive_buf() method by 48 * excluding the buffer work and any pending flush from using the flip 49 * buffer. Data can continue to be added concurrently to the flip buffer 50 * from the driver side. 51 * 52 * On release, the buffer work is restarted if there is data in the 53 * flip buffer 54 */ 55 56void tty_buffer_lock_exclusive(struct tty_port *port) 57{ 58 struct tty_bufhead *buf = &port->buf; 59 60 atomic_inc(&buf->priority); 61 mutex_lock(&buf->lock); 62} 63 64void tty_buffer_unlock_exclusive(struct tty_port *port) 65{ 66 struct tty_bufhead *buf = &port->buf; 67 int restart; 68 69 restart = buf->head->commit != buf->head->read; 70 71 atomic_dec(&buf->priority); 72 mutex_unlock(&buf->lock); 73 if (restart) 74 queue_work(system_unbound_wq, &buf->work); 75} 76 77/** 78 * tty_buffer_space_avail - return unused buffer space 79 * @port - tty_port owning the flip buffer 80 * 81 * Returns the # of bytes which can be written by the driver without 82 * reaching the buffer limit. 83 * 84 * Note: this does not guarantee that memory is available to write 85 * the returned # of bytes (use tty_prepare_flip_string_xxx() to 86 * pre-allocate if memory guarantee is required). 87 */ 88 89int tty_buffer_space_avail(struct tty_port *port) 90{ 91 int space = port->buf.mem_limit - atomic_read(&port->buf.mem_used); 92 return max(space, 0); 93} 94EXPORT_SYMBOL_GPL(tty_buffer_space_avail); 95 96static void tty_buffer_reset(struct tty_buffer *p, size_t size) 97{ 98 p->used = 0; 99 p->size = size; 100 p->next = NULL; 101 p->commit = 0; 102 p->read = 0; 103 p->flags = 0; 104} 105 106/** 107 * tty_buffer_free_all - free buffers used by a tty 108 * @tty: tty to free from 109 * 110 * Remove all the buffers pending on a tty whether queued with data 111 * or in the free ring. Must be called when the tty is no longer in use 112 */ 113 114void tty_buffer_free_all(struct tty_port *port) 115{ 116 struct tty_bufhead *buf = &port->buf; 117 struct tty_buffer *p, *next; 118 struct llist_node *llist; 119 120 while ((p = buf->head) != NULL) { 121 buf->head = p->next; 122 if (p->size > 0) 123 kfree(p); 124 } 125 llist = llist_del_all(&buf->free); 126 llist_for_each_entry_safe(p, next, llist, free) 127 kfree(p); 128 129 tty_buffer_reset(&buf->sentinel, 0); 130 buf->head = &buf->sentinel; 131 buf->tail = &buf->sentinel; 132 133 atomic_set(&buf->mem_used, 0); 134} 135 136/** 137 * tty_buffer_alloc - allocate a tty buffer 138 * @tty: tty device 139 * @size: desired size (characters) 140 * 141 * Allocate a new tty buffer to hold the desired number of characters. 142 * We round our buffers off in 256 character chunks to get better 143 * allocation behaviour. 144 * Return NULL if out of memory or the allocation would exceed the 145 * per device queue 146 */ 147 148static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size) 149{ 150 struct llist_node *free; 151 struct tty_buffer *p; 152 153 /* Round the buffer size out */ 154 size = __ALIGN_MASK(size, TTYB_ALIGN_MASK); 155 156 if (size <= MIN_TTYB_SIZE) { 157 free = llist_del_first(&port->buf.free); 158 if (free) { 159 p = llist_entry(free, struct tty_buffer, free); 160 goto found; 161 } 162 } 163 164 /* Should possibly check if this fails for the largest buffer we 165 have queued and recycle that ? */ 166 if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit) 167 return NULL; 168 p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); 169 if (p == NULL) 170 return NULL; 171 172found: 173 tty_buffer_reset(p, size); 174 atomic_add(size, &port->buf.mem_used); 175 return p; 176} 177 178/** 179 * tty_buffer_free - free a tty buffer 180 * @tty: tty owning the buffer 181 * @b: the buffer to free 182 * 183 * Free a tty buffer, or add it to the free list according to our 184 * internal strategy 185 */ 186 187static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b) 188{ 189 struct tty_bufhead *buf = &port->buf; 190 191 /* Dumb strategy for now - should keep some stats */ 192 WARN_ON(atomic_sub_return(b->size, &buf->mem_used) < 0); 193 194 if (b->size > MIN_TTYB_SIZE) 195 kfree(b); 196 else if (b->size > 0) 197 llist_add(&b->free, &buf->free); 198} 199 200/** 201 * tty_buffer_flush - flush full tty buffers 202 * @tty: tty to flush 203 * 204 * flush all the buffers containing receive data. 205 * 206 * Locking: takes buffer lock to ensure single-threaded flip buffer 207 * 'consumer' 208 */ 209 210void tty_buffer_flush(struct tty_struct *tty) 211{ 212 struct tty_port *port = tty->port; 213 struct tty_bufhead *buf = &port->buf; 214 struct tty_buffer *next; 215 216 atomic_inc(&buf->priority); 217 218 mutex_lock(&buf->lock); 219 while ((next = buf->head->next) != NULL) { 220 tty_buffer_free(port, buf->head); 221 buf->head = next; 222 } 223 buf->head->read = buf->head->commit; 224 atomic_dec(&buf->priority); 225 mutex_unlock(&buf->lock); 226} 227 228/** 229 * tty_buffer_request_room - grow tty buffer if needed 230 * @tty: tty structure 231 * @size: size desired 232 * @flags: buffer flags if new buffer allocated (default = 0) 233 * 234 * Make at least size bytes of linear space available for the tty 235 * buffer. If we fail return the size we managed to find. 236 * 237 * Will change over to a new buffer if the current buffer is encoded as 238 * TTY_NORMAL (so has no flags buffer) and the new buffer requires 239 * a flags buffer. 240 */ 241static int __tty_buffer_request_room(struct tty_port *port, size_t size, 242 int flags) 243{ 244 struct tty_bufhead *buf = &port->buf; 245 struct tty_buffer *b, *n; 246 int left, change; 247 248 b = buf->tail; 249 if (b->flags & TTYB_NORMAL) 250 left = 2 * b->size - b->used; 251 else 252 left = b->size - b->used; 253 254 change = (b->flags & TTYB_NORMAL) && (~flags & TTYB_NORMAL); 255 if (change || left < size) { 256 /* This is the slow path - looking for new buffers to use */ 257 if ((n = tty_buffer_alloc(port, size)) != NULL) { 258 n->flags = flags; 259 buf->tail = n; 260 b->commit = b->used; 261 smp_mb(); 262 b->next = n; 263 } else if (change) 264 size = 0; 265 else 266 size = left; 267 } 268 return size; 269} 270 271int tty_buffer_request_room(struct tty_port *port, size_t size) 272{ 273 return __tty_buffer_request_room(port, size, 0); 274} 275EXPORT_SYMBOL_GPL(tty_buffer_request_room); 276 277/** 278 * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer 279 * @port: tty port 280 * @chars: characters 281 * @flag: flag value for each character 282 * @size: size 283 * 284 * Queue a series of bytes to the tty buffering. All the characters 285 * passed are marked with the supplied flag. Returns the number added. 286 */ 287 288int tty_insert_flip_string_fixed_flag(struct tty_port *port, 289 const unsigned char *chars, char flag, size_t size) 290{ 291 int copied = 0; 292 do { 293 int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); 294 int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0; 295 int space = __tty_buffer_request_room(port, goal, flags); 296 struct tty_buffer *tb = port->buf.tail; 297 if (unlikely(space == 0)) 298 break; 299 memcpy(char_buf_ptr(tb, tb->used), chars, space); 300 if (~tb->flags & TTYB_NORMAL) 301 memset(flag_buf_ptr(tb, tb->used), flag, space); 302 tb->used += space; 303 copied += space; 304 chars += space; 305 /* There is a small chance that we need to split the data over 306 several buffers. If this is the case we must loop */ 307 } while (unlikely(size > copied)); 308 return copied; 309} 310EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag); 311 312/** 313 * tty_insert_flip_string_flags - Add characters to the tty buffer 314 * @port: tty port 315 * @chars: characters 316 * @flags: flag bytes 317 * @size: size 318 * 319 * Queue a series of bytes to the tty buffering. For each character 320 * the flags array indicates the status of the character. Returns the 321 * number added. 322 */ 323 324int tty_insert_flip_string_flags(struct tty_port *port, 325 const unsigned char *chars, const char *flags, size_t size) 326{ 327 int copied = 0; 328 do { 329 int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); 330 int space = tty_buffer_request_room(port, goal); 331 struct tty_buffer *tb = port->buf.tail; 332 if (unlikely(space == 0)) 333 break; 334 memcpy(char_buf_ptr(tb, tb->used), chars, space); 335 memcpy(flag_buf_ptr(tb, tb->used), flags, space); 336 tb->used += space; 337 copied += space; 338 chars += space; 339 flags += space; 340 /* There is a small chance that we need to split the data over 341 several buffers. If this is the case we must loop */ 342 } while (unlikely(size > copied)); 343 return copied; 344} 345EXPORT_SYMBOL(tty_insert_flip_string_flags); 346 347/** 348 * tty_schedule_flip - push characters to ldisc 349 * @port: tty port to push from 350 * 351 * Takes any pending buffers and transfers their ownership to the 352 * ldisc side of the queue. It then schedules those characters for 353 * processing by the line discipline. 354 */ 355 356void tty_schedule_flip(struct tty_port *port) 357{ 358 struct tty_bufhead *buf = &port->buf; 359 360 buf->tail->commit = buf->tail->used; 361 schedule_work(&buf->work); 362} 363EXPORT_SYMBOL(tty_schedule_flip); 364 365/** 366 * tty_prepare_flip_string - make room for characters 367 * @port: tty port 368 * @chars: return pointer for character write area 369 * @size: desired size 370 * 371 * Prepare a block of space in the buffer for data. Returns the length 372 * available and buffer pointer to the space which is now allocated and 373 * accounted for as ready for normal characters. This is used for drivers 374 * that need their own block copy routines into the buffer. There is no 375 * guarantee the buffer is a DMA target! 376 */ 377 378int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars, 379 size_t size) 380{ 381 int space = __tty_buffer_request_room(port, size, TTYB_NORMAL); 382 if (likely(space)) { 383 struct tty_buffer *tb = port->buf.tail; 384 *chars = char_buf_ptr(tb, tb->used); 385 if (~tb->flags & TTYB_NORMAL) 386 memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space); 387 tb->used += space; 388 } 389 return space; 390} 391EXPORT_SYMBOL_GPL(tty_prepare_flip_string); 392 393 394static int 395receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count) 396{ 397 struct tty_ldisc *disc = tty->ldisc; 398 unsigned char *p = char_buf_ptr(head, head->read); 399 char *f = NULL; 400 401 if (~head->flags & TTYB_NORMAL) 402 f = flag_buf_ptr(head, head->read); 403 404 if (disc->ops->receive_buf2) 405 count = disc->ops->receive_buf2(tty, p, f, count); 406 else { 407 count = min_t(int, count, tty->receive_room); 408 if (count) 409 disc->ops->receive_buf(tty, p, f, count); 410 } 411 head->read += count; 412 return count; 413} 414 415/** 416 * flush_to_ldisc 417 * @work: tty structure passed from work queue. 418 * 419 * This routine is called out of the software interrupt to flush data 420 * from the buffer chain to the line discipline. 421 * 422 * The receive_buf method is single threaded for each tty instance. 423 * 424 * Locking: takes buffer lock to ensure single-threaded flip buffer 425 * 'consumer' 426 */ 427 428static void flush_to_ldisc(struct work_struct *work) 429{ 430 struct tty_port *port = container_of(work, struct tty_port, buf.work); 431 struct tty_bufhead *buf = &port->buf; 432 struct tty_struct *tty; 433 struct tty_ldisc *disc; 434 435 tty = port->itty; 436 if (tty == NULL) 437 return; 438 439 disc = tty_ldisc_ref(tty); 440 if (disc == NULL) 441 return; 442 443 mutex_lock(&buf->lock); 444 445 while (1) { 446 struct tty_buffer *head = buf->head; 447 int count; 448 449 /* Ldisc or user is trying to gain exclusive access */ 450 if (atomic_read(&buf->priority)) 451 break; 452 453 count = head->commit - head->read; 454 if (!count) { 455 if (head->next == NULL) 456 break; 457 buf->head = head->next; 458 tty_buffer_free(port, head); 459 continue; 460 } 461 462 count = receive_buf(tty, head, count); 463 if (!count) 464 break; 465 } 466 467 mutex_unlock(&buf->lock); 468 469 tty_ldisc_deref(disc); 470} 471 472/** 473 * tty_flush_to_ldisc 474 * @tty: tty to push 475 * 476 * Push the terminal flip buffers to the line discipline. 477 * 478 * Must not be called from IRQ context. 479 */ 480void tty_flush_to_ldisc(struct tty_struct *tty) 481{ 482 flush_work(&tty->port->buf.work); 483} 484 485/** 486 * tty_flip_buffer_push - terminal 487 * @port: tty port to push 488 * 489 * Queue a push of the terminal flip buffers to the line discipline. 490 * Can be called from IRQ/atomic context. 491 * 492 * In the event of the queue being busy for flipping the work will be 493 * held off and retried later. 494 */ 495 496void tty_flip_buffer_push(struct tty_port *port) 497{ 498 tty_schedule_flip(port); 499} 500EXPORT_SYMBOL(tty_flip_buffer_push); 501 502/** 503 * tty_buffer_init - prepare a tty buffer structure 504 * @tty: tty to initialise 505 * 506 * Set up the initial state of the buffer management for a tty device. 507 * Must be called before the other tty buffer functions are used. 508 */ 509 510void tty_buffer_init(struct tty_port *port) 511{ 512 struct tty_bufhead *buf = &port->buf; 513 514 mutex_init(&buf->lock); 515 tty_buffer_reset(&buf->sentinel, 0); 516 buf->head = &buf->sentinel; 517 buf->tail = &buf->sentinel; 518 init_llist_head(&buf->free); 519 atomic_set(&buf->mem_used, 0); 520 atomic_set(&buf->priority, 0); 521 INIT_WORK(&buf->work, flush_to_ldisc); 522 buf->mem_limit = TTYB_DEFAULT_MEM_LIMIT; 523} 524 525/** 526 * tty_buffer_set_limit - change the tty buffer memory limit 527 * @port: tty port to change 528 * 529 * Change the tty buffer memory limit. 530 * Must be called before the other tty buffer functions are used. 531 */ 532 533int tty_buffer_set_limit(struct tty_port *port, int limit) 534{ 535 if (limit < MIN_TTYB_SIZE) 536 return -EINVAL; 537 port->buf.mem_limit = limit; 538 return 0; 539} 540EXPORT_SYMBOL_GPL(tty_buffer_set_limit);