* branch 'tty-fixes': tty: use the new 'flush_delayed_work()' helper to do ldisc flush workqueue: add 'flush_delayed_work()' to run and wait for delayed work Make flush_to_ldisc properly handle parallel calls
···402 container_of(work, struct tty_struct, buf.work.work);403 unsigned long flags;404 struct tty_ldisc *disc;405- struct tty_buffer *tbuf, *head;406- char *char_buf;407- unsigned char *flag_buf;408409 disc = tty_ldisc_ref(tty);410 if (disc == NULL) /* !TTY_LDISC */411 return;412413 spin_lock_irqsave(&tty->buf.lock, flags);414- /* So we know a flush is running */415- set_bit(TTY_FLUSHING, &tty->flags);416- head = tty->buf.head;417- if (head != NULL) {418- tty->buf.head = NULL;419- for (;;) {420- int count = head->commit - head->read;00421 if (!count) {422 if (head->next == NULL)423 break;424- tbuf = head;425- head = head->next;426- tty_buffer_free(tty, tbuf);427 continue;428 }429 /* Ldisc or user is trying to flush the buffers···443 flag_buf, count);444 spin_lock_irqsave(&tty->buf.lock, flags);445 }446- /* Restore the queue head */447- tty->buf.head = head;448 }0449 /* We may have a deferred request to flush the input buffer,450 if so pull the chain under the lock and empty the queue */451 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {···453 clear_bit(TTY_FLUSHPENDING, &tty->flags);454 wake_up(&tty->read_wait);455 }456- clear_bit(TTY_FLUSHING, &tty->flags);457 spin_unlock_irqrestore(&tty->buf.lock, flags);458459 tty_ldisc_deref(disc);···468 */469void tty_flush_to_ldisc(struct tty_struct *tty)470{471- flush_to_ldisc(&tty->buf.work.work);472}473474/**
···402 container_of(work, struct tty_struct, buf.work.work);403 unsigned long flags;404 struct tty_ldisc *disc;000405406 disc = tty_ldisc_ref(tty);407 if (disc == NULL) /* !TTY_LDISC */408 return;409410 spin_lock_irqsave(&tty->buf.lock, flags);411+412+ if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {413+ struct tty_buffer *head;414+ while ((head = tty->buf.head) != NULL) {415+ int count;416+ char *char_buf;417+ unsigned char *flag_buf;418+419+ count = head->commit - head->read;420 if (!count) {421 if (head->next == NULL)422 break;423+ tty->buf.head = head->next;424+ tty_buffer_free(tty, head);0425 continue;426 }427 /* Ldisc or user is trying to flush the buffers···445 flag_buf, count);446 spin_lock_irqsave(&tty->buf.lock, flags);447 }448+ clear_bit(TTY_FLUSHING, &tty->flags);0449 }450+451 /* We may have a deferred request to flush the input buffer,452 if so pull the chain under the lock and empty the queue */453 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {···455 clear_bit(TTY_FLUSHPENDING, &tty->flags);456 wake_up(&tty->read_wait);457 }0458 spin_unlock_irqrestore(&tty->buf.lock, flags);459460 tty_ldisc_deref(disc);···471 */472void tty_flush_to_ldisc(struct tty_struct *tty)473{474+ flush_delayed_work(&tty->buf.work);475}476477/**
+1
include/linux/workqueue.h
···207208extern void flush_workqueue(struct workqueue_struct *wq);209extern void flush_scheduled_work(void);0210211extern int schedule_work(struct work_struct *work);212extern int schedule_work_on(int cpu, struct work_struct *work);
···640EXPORT_SYMBOL(schedule_delayed_work);641642/**000000000000000000643 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay644 * @cpu: cpu to use645 * @dwork: job to be done
···640EXPORT_SYMBOL(schedule_delayed_work);641642/**643+ * flush_delayed_work - block until a dwork_struct's callback has terminated644+ * @dwork: the delayed work which is to be flushed645+ *646+ * Any timeout is cancelled, and any pending work is run immediately.647+ */648+void flush_delayed_work(struct delayed_work *dwork)649+{650+ if (del_timer(&dwork->timer)) {651+ struct cpu_workqueue_struct *cwq;652+ cwq = wq_per_cpu(keventd_wq, get_cpu());653+ __queue_work(cwq, &dwork->work);654+ put_cpu();655+ }656+ flush_work(&dwork->work);657+}658+EXPORT_SYMBOL(flush_delayed_work);659+660+/**661 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay662 * @cpu: cpu to use663 * @dwork: job to be done