Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this
9 * list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <AK/QuickSort.h>
28#include <AK/TemporaryChange.h>
29#include <Kernel/Devices/PIT.h>
30#include <Kernel/FileSystem/FileDescription.h>
31#include <Kernel/Net/Socket.h>
32#include <Kernel/Process.h>
33#include <Kernel/Profiling.h>
34#include <Kernel/RTC.h>
35#include <Kernel/Scheduler.h>
36#include <Kernel/TimerQueue.h>
37
38//#define LOG_EVERY_CONTEXT_SWITCH
39//#define SCHEDULER_DEBUG
40//#define SCHEDULER_RUNNABLE_DEBUG
41
42namespace Kernel {
43
44SchedulerData* g_scheduler_data;
45
46void Scheduler::init_thread(Thread& thread)
47{
48 g_scheduler_data->m_nonrunnable_threads.append(thread);
49}
50
51void Scheduler::update_state_for_thread(Thread& thread)
52{
53 ASSERT_INTERRUPTS_DISABLED();
54 auto& list = g_scheduler_data->thread_list_for_state(thread.state());
55
56 if (list.contains(thread))
57 return;
58
59 list.append(thread);
60}
61
62static u32 time_slice_for(const Thread& thread)
63{
64 // One time slice unit == 1ms
65 if (&thread == g_colonel)
66 return 1;
67 return 10;
68}
69
70Thread* g_finalizer;
71Thread* g_colonel;
72WaitQueue* g_finalizer_wait_queue;
73bool g_finalizer_has_work;
74static Process* s_colonel_process;
75u64 g_uptime;
76
77struct TaskRedirectionData {
78 u16 selector;
79 TSS32 tss;
80};
81static TaskRedirectionData s_redirection;
82static bool s_active;
83
84bool Scheduler::is_active()
85{
86 return s_active;
87}
88
89Thread::JoinBlocker::JoinBlocker(Thread& joinee, void*& joinee_exit_value)
90 : m_joinee(joinee)
91 , m_joinee_exit_value(joinee_exit_value)
92{
93 ASSERT(m_joinee.m_joiner == nullptr);
94 m_joinee.m_joiner = Thread::current;
95 Thread::current->m_joinee = &joinee;
96}
97
98bool Thread::JoinBlocker::should_unblock(Thread& joiner, time_t, long)
99{
100 return !joiner.m_joinee;
101}
102
103Thread::FileDescriptionBlocker::FileDescriptionBlocker(const FileDescription& description)
104 : m_blocked_description(description)
105{
106}
107
108const FileDescription& Thread::FileDescriptionBlocker::blocked_description() const
109{
110 return m_blocked_description;
111}
112
113Thread::AcceptBlocker::AcceptBlocker(const FileDescription& description)
114 : FileDescriptionBlocker(description)
115{
116}
117
118bool Thread::AcceptBlocker::should_unblock(Thread&, time_t, long)
119{
120 auto& socket = *blocked_description().socket();
121 return socket.can_accept();
122}
123
124Thread::ConnectBlocker::ConnectBlocker(const FileDescription& description)
125 : FileDescriptionBlocker(description)
126{
127}
128
129bool Thread::ConnectBlocker::should_unblock(Thread&, time_t, long)
130{
131 auto& socket = *blocked_description().socket();
132 return socket.setup_state() == Socket::SetupState::Completed;
133}
134
135Thread::WriteBlocker::WriteBlocker(const FileDescription& description)
136 : FileDescriptionBlocker(description)
137{
138 if (description.is_socket()) {
139 auto& socket = *description.socket();
140 if (socket.has_send_timeout()) {
141 timeval deadline = kgettimeofday();
142 deadline.tv_sec += socket.send_timeout().tv_sec;
143 deadline.tv_usec += socket.send_timeout().tv_usec;
144 deadline.tv_sec += (socket.send_timeout().tv_usec / 1000000) * 1;
145 deadline.tv_usec %= 1000000;
146 m_deadline = deadline;
147 }
148 }
149}
150
151bool Thread::WriteBlocker::should_unblock(Thread&, time_t now_sec, long now_usec)
152{
153 if (m_deadline.has_value()) {
154 bool timed_out = now_sec > m_deadline.value().tv_sec || (now_sec == m_deadline.value().tv_sec && now_usec >= m_deadline.value().tv_usec);
155 return timed_out || blocked_description().can_write();
156 }
157 return blocked_description().can_write();
158}
159
160Thread::ReadBlocker::ReadBlocker(const FileDescription& description)
161 : FileDescriptionBlocker(description)
162{
163 if (description.is_socket()) {
164 auto& socket = *description.socket();
165 if (socket.has_receive_timeout()) {
166 timeval deadline = kgettimeofday();
167 deadline.tv_sec += socket.receive_timeout().tv_sec;
168 deadline.tv_usec += socket.receive_timeout().tv_usec;
169 deadline.tv_sec += (socket.receive_timeout().tv_usec / 1000000) * 1;
170 deadline.tv_usec %= 1000000;
171 m_deadline = deadline;
172 }
173 }
174}
175
176bool Thread::ReadBlocker::should_unblock(Thread&, time_t now_sec, long now_usec)
177{
178 if (m_deadline.has_value()) {
179 bool timed_out = now_sec > m_deadline.value().tv_sec || (now_sec == m_deadline.value().tv_sec && now_usec >= m_deadline.value().tv_usec);
180 return timed_out || blocked_description().can_read();
181 }
182 return blocked_description().can_read();
183}
184
185Thread::ConditionBlocker::ConditionBlocker(const char* state_string, Function<bool()>&& condition)
186 : m_block_until_condition(move(condition))
187 , m_state_string(state_string)
188{
189 ASSERT(m_block_until_condition);
190}
191
192bool Thread::ConditionBlocker::should_unblock(Thread&, time_t, long)
193{
194 return m_block_until_condition();
195}
196
197Thread::SleepBlocker::SleepBlocker(u64 wakeup_time)
198 : m_wakeup_time(wakeup_time)
199{
200}
201
202bool Thread::SleepBlocker::should_unblock(Thread&, time_t, long)
203{
204 return m_wakeup_time <= g_uptime;
205}
206
207Thread::SelectBlocker::SelectBlocker(const timeval& tv, bool select_has_timeout, const FDVector& read_fds, const FDVector& write_fds, const FDVector& except_fds)
208 : m_select_timeout(tv)
209 , m_select_has_timeout(select_has_timeout)
210 , m_select_read_fds(read_fds)
211 , m_select_write_fds(write_fds)
212 , m_select_exceptional_fds(except_fds)
213{
214}
215
216bool Thread::SelectBlocker::should_unblock(Thread& thread, time_t now_sec, long now_usec)
217{
218 if (m_select_has_timeout) {
219 if (now_sec > m_select_timeout.tv_sec || (now_sec == m_select_timeout.tv_sec && now_usec >= m_select_timeout.tv_usec))
220 return true;
221 }
222
223 auto& process = thread.process();
224 for (int fd : m_select_read_fds) {
225 if (!process.m_fds[fd])
226 continue;
227 if (process.m_fds[fd].description->can_read())
228 return true;
229 }
230 for (int fd : m_select_write_fds) {
231 if (!process.m_fds[fd])
232 continue;
233 if (process.m_fds[fd].description->can_write())
234 return true;
235 }
236
237 return false;
238}
239
240Thread::WaitBlocker::WaitBlocker(int wait_options, pid_t& waitee_pid)
241 : m_wait_options(wait_options)
242 , m_waitee_pid(waitee_pid)
243{
244}
245
246bool Thread::WaitBlocker::should_unblock(Thread& thread, time_t, long)
247{
248 bool should_unblock = false;
249 if (m_waitee_pid != -1) {
250 auto* peer = Process::from_pid(m_waitee_pid);
251 if (!peer)
252 return true;
253 }
254 thread.process().for_each_child([&](Process& child) {
255 if (m_waitee_pid != -1 && m_waitee_pid != child.pid())
256 return IterationDecision::Continue;
257
258 bool child_exited = child.is_dead();
259 bool child_stopped = child.thread_count() && child.any_thread().state() == Thread::State::Stopped;
260
261 bool wait_finished = ((m_wait_options & WEXITED) && child_exited)
262 || ((m_wait_options & WSTOPPED) && child_stopped);
263
264 if (!wait_finished)
265 return IterationDecision::Continue;
266
267 m_waitee_pid = child.pid();
268 should_unblock = true;
269 return IterationDecision::Break;
270 });
271 return should_unblock;
272}
273
274Thread::SemiPermanentBlocker::SemiPermanentBlocker(Reason reason)
275 : m_reason(reason)
276{
277}
278
279bool Thread::SemiPermanentBlocker::should_unblock(Thread&, time_t, long)
280{
281 // someone else has to unblock us
282 return false;
283}
284
285// Called by the scheduler on threads that are blocked for some reason.
286// Make a decision as to whether to unblock them or not.
287void Thread::consider_unblock(time_t now_sec, long now_usec)
288{
289 switch (state()) {
290 case Thread::Invalid:
291 case Thread::Runnable:
292 case Thread::Running:
293 case Thread::Dead:
294 case Thread::Stopped:
295 case Thread::Queued:
296 case Thread::Dying:
297 /* don't know, don't care */
298 return;
299 case Thread::Blocked:
300 ASSERT(m_blocker != nullptr);
301 if (m_blocker->should_unblock(*this, now_sec, now_usec))
302 unblock();
303 return;
304 case Thread::Skip1SchedulerPass:
305 set_state(Thread::Skip0SchedulerPasses);
306 return;
307 case Thread::Skip0SchedulerPasses:
308 set_state(Thread::Runnable);
309 return;
310 }
311}
312
313bool Scheduler::pick_next()
314{
315 ASSERT_INTERRUPTS_DISABLED();
316 ASSERT(!s_active);
317
318 TemporaryChange<bool> change(s_active, true);
319
320 ASSERT(s_active);
321
322 if (!Thread::current) {
323 // XXX: The first ever context_switch() goes to the idle process.
324 // This to setup a reliable place we can return to.
325 return context_switch(*g_colonel);
326 }
327
328 struct timeval now;
329 kgettimeofday(now);
330
331 auto now_sec = now.tv_sec;
332 auto now_usec = now.tv_usec;
333
334 // Check and unblock threads whose wait conditions have been met.
335 Scheduler::for_each_nonrunnable([&](Thread& thread) {
336 thread.consider_unblock(now_sec, now_usec);
337 return IterationDecision::Continue;
338 });
339
340 Process::for_each([&](Process& process) {
341 if (process.is_dead()) {
342 if (Process::current->pid() != process.pid() && (!process.ppid() || !Process::from_pid(process.ppid()))) {
343 auto name = process.name();
344 auto pid = process.pid();
345 auto exit_status = Process::reap(process);
346 dbgprintf("reaped unparented process %s(%u), exit status: %u\n", name.characters(), pid, exit_status);
347 }
348 return IterationDecision::Continue;
349 }
350 if (process.m_alarm_deadline && g_uptime > process.m_alarm_deadline) {
351 process.m_alarm_deadline = 0;
352 process.send_signal(SIGALRM, nullptr);
353 }
354 return IterationDecision::Continue;
355 });
356
357 // Dispatch any pending signals.
358 Thread::for_each_living([](Thread& thread) -> IterationDecision {
359 if (!thread.has_unmasked_pending_signals())
360 return IterationDecision::Continue;
361 // FIXME: It would be nice if the Scheduler didn't have to worry about who is "current"
362 // For now, avoid dispatching signals to "current" and do it in a scheduling pass
363 // while some other process is interrupted. Otherwise a mess will be made.
364 if (&thread == Thread::current)
365 return IterationDecision::Continue;
366 // We know how to interrupt blocked processes, but if they are just executing
367 // at some random point in the kernel, let them continue.
368 // Before returning to userspace from a syscall, we will block a thread if it has any
369 // pending unmasked signals, allowing it to be dispatched then.
370 if (thread.in_kernel() && !thread.is_blocked() && !thread.is_stopped())
371 return IterationDecision::Continue;
372 // NOTE: dispatch_one_pending_signal() may unblock the process.
373 bool was_blocked = thread.is_blocked();
374 if (thread.dispatch_one_pending_signal() == ShouldUnblockThread::No)
375 return IterationDecision::Continue;
376 if (was_blocked) {
377 dbgprintf("Unblock %s(%u) due to signal\n", thread.process().name().characters(), thread.pid());
378 ASSERT(thread.m_blocker != nullptr);
379 thread.m_blocker->set_interrupted_by_signal();
380 thread.unblock();
381 }
382 return IterationDecision::Continue;
383 });
384
385#ifdef SCHEDULER_RUNNABLE_DEBUG
386 dbgprintf("Non-runnables:\n");
387 Scheduler::for_each_nonrunnable([](Thread& thread) -> IterationDecision {
388 dbgprintf(" %-12s %s(%u:%u) @ %w:%x\n", thread.state_string(), thread.name().characters(), thread.pid(), thread.tid(), thread.tss().cs, thread.tss().eip);
389 return IterationDecision::Continue;
390 });
391
392 dbgprintf("Runnables:\n");
393 Scheduler::for_each_runnable([](Thread& thread) -> IterationDecision {
394 dbgprintf(" %3u/%2u %-12s %s(%u:%u) @ %w:%x\n", thread.effective_priority(), thread.priority(), thread.state_string(), thread.name().characters(), thread.pid(), thread.tid(), thread.tss().cs, thread.tss().eip);
395 return IterationDecision::Continue;
396 });
397#endif
398
399 Vector<Thread*, 128> sorted_runnables;
400 for_each_runnable([&sorted_runnables](auto& thread) {
401 sorted_runnables.append(&thread);
402 return IterationDecision::Continue;
403 });
404 quick_sort(sorted_runnables.begin(), sorted_runnables.end(), [](auto& a, auto& b) { return a->effective_priority() >= b->effective_priority(); });
405
406 Thread* thread_to_schedule = nullptr;
407
408 for (auto* thread : sorted_runnables) {
409 if (thread->process().is_being_inspected())
410 continue;
411
412 ASSERT(thread->state() == Thread::Runnable || thread->state() == Thread::Running);
413
414 if (!thread_to_schedule) {
415 thread->m_extra_priority = 0;
416 thread_to_schedule = thread;
417 } else {
418 thread->m_extra_priority++;
419 }
420 }
421
422 if (!thread_to_schedule)
423 thread_to_schedule = g_colonel;
424
425#ifdef SCHEDULER_DEBUG
426 dbgprintf("switch to %s(%u:%u) @ %w:%x\n",
427 thread_to_schedule->name().characters(),
428 thread_to_schedule->pid(),
429 thread_to_schedule->tid(),
430 thread_to_schedule->tss().cs,
431 thread_to_schedule->tss().eip);
432#endif
433
434 return context_switch(*thread_to_schedule);
435}
436
437bool Scheduler::donate_to(Thread* beneficiary, const char* reason)
438{
439 InterruptDisabler disabler;
440 if (!Thread::is_thread(beneficiary))
441 return false;
442
443 (void)reason;
444 unsigned ticks_left = Thread::current->ticks_left();
445 if (!beneficiary || beneficiary->state() != Thread::Runnable || ticks_left <= 1)
446 return yield();
447
448 unsigned ticks_to_donate = min(ticks_left - 1, time_slice_for(*beneficiary));
449#ifdef SCHEDULER_DEBUG
450 dbgprintf("%s(%u:%u) donating %u ticks to %s(%u:%u), reason=%s\n", Process::current->name().characters(), Process::current->pid(), Thread::current->tid(), ticks_to_donate, beneficiary->process().name().characters(), beneficiary->pid(), beneficiary->tid(), reason);
451#endif
452 context_switch(*beneficiary);
453 beneficiary->set_ticks_left(ticks_to_donate);
454 switch_now();
455 return false;
456}
457
458bool Scheduler::yield()
459{
460 InterruptDisabler disabler;
461 ASSERT(Thread::current);
462 if (!pick_next())
463 return false;
464 switch_now();
465 return true;
466}
467
468void Scheduler::pick_next_and_switch_now()
469{
470 bool someone_wants_to_run = pick_next();
471 ASSERT(someone_wants_to_run);
472 switch_now();
473}
474
475void Scheduler::switch_now()
476{
477 Descriptor& descriptor = get_gdt_entry(Thread::current->selector());
478 descriptor.type = 9;
479 asm("sti\n"
480 "ljmp *(%%eax)\n" ::"a"(&Thread::current->far_ptr()));
481}
482
483bool Scheduler::context_switch(Thread& thread)
484{
485 thread.set_ticks_left(time_slice_for(thread));
486 thread.did_schedule();
487
488 if (Thread::current == &thread)
489 return false;
490
491 if (Thread::current) {
492 // If the last process hasn't blocked (still marked as running),
493 // mark it as runnable for the next round.
494 if (Thread::current->state() == Thread::Running)
495 Thread::current->set_state(Thread::Runnable);
496
497 asm volatile("fxsave %0"
498 : "=m"(Thread::current->fpu_state()));
499
500#ifdef LOG_EVERY_CONTEXT_SWITCH
501 dbgprintf("Scheduler: %s(%u:%u) -> %s(%u:%u) [%u] %w:%x\n",
502 Process::current->name().characters(), Process::current->pid(), Thread::current->tid(),
503 thread.process().name().characters(), thread.process().pid(), thread.tid(),
504 thread.priority(),
505 thread.tss().cs, thread.tss().eip);
506#endif
507 }
508
509 Thread::current = &thread;
510 Process::current = &thread.process();
511
512 thread.set_state(Thread::Running);
513
514 asm volatile("fxrstor %0" ::"m"(Thread::current->fpu_state()));
515
516 if (!thread.selector()) {
517 thread.set_selector(gdt_alloc_entry());
518 auto& descriptor = get_gdt_entry(thread.selector());
519 descriptor.set_base(&thread.tss());
520 descriptor.set_limit(sizeof(TSS32));
521 descriptor.dpl = 0;
522 descriptor.segment_present = 1;
523 descriptor.granularity = 0;
524 descriptor.zero = 0;
525 descriptor.operation_size = 1;
526 descriptor.descriptor_type = 0;
527 }
528
529 if (!thread.thread_specific_data().is_null()) {
530 auto& descriptor = thread_specific_descriptor();
531 descriptor.set_base(thread.thread_specific_data().as_ptr());
532 descriptor.set_limit(sizeof(ThreadSpecificData*));
533 }
534
535 auto& descriptor = get_gdt_entry(thread.selector());
536 descriptor.type = 11; // Busy TSS
537 return true;
538}
539
540static void initialize_redirection()
541{
542 auto& descriptor = get_gdt_entry(s_redirection.selector);
543 descriptor.set_base(&s_redirection.tss);
544 descriptor.set_limit(sizeof(TSS32));
545 descriptor.dpl = 0;
546 descriptor.segment_present = 1;
547 descriptor.granularity = 0;
548 descriptor.zero = 0;
549 descriptor.operation_size = 1;
550 descriptor.descriptor_type = 0;
551 descriptor.type = 9;
552 flush_gdt();
553}
554
555void Scheduler::prepare_for_iret_to_new_process()
556{
557 auto& descriptor = get_gdt_entry(s_redirection.selector);
558 descriptor.type = 9;
559 s_redirection.tss.backlink = Thread::current->selector();
560 load_task_register(s_redirection.selector);
561}
562
563void Scheduler::prepare_to_modify_tss(Thread& thread)
564{
565 // This ensures that a currently running process modifying its own TSS
566 // in order to yield() and end up somewhere else doesn't just end up
567 // right after the yield().
568 if (Thread::current == &thread)
569 load_task_register(s_redirection.selector);
570}
571
572Process* Scheduler::colonel()
573{
574 return s_colonel_process;
575}
576
577void Scheduler::initialize()
578{
579 g_scheduler_data = new SchedulerData;
580 g_finalizer_wait_queue = new WaitQueue;
581 g_finalizer_has_work = false;
582 s_redirection.selector = gdt_alloc_entry();
583 initialize_redirection();
584 s_colonel_process = Process::create_kernel_process(g_colonel, "colonel", nullptr);
585 g_colonel->set_priority(THREAD_PRIORITY_MIN);
586 load_task_register(s_redirection.selector);
587}
588
589void Scheduler::timer_tick(RegisterState& regs)
590{
591 if (!Thread::current)
592 return;
593
594 ++g_uptime;
595
596 timeval tv;
597 tv.tv_sec = RTC::boot_time() + PIT::the().seconds_since_boot();
598 tv.tv_usec = PIT::the().ticks_this_second() * 1000;
599 Process::update_info_page_timestamp(tv);
600
601 if (Process::current->is_profiling()) {
602 SmapDisabler disabler;
603 auto backtrace = Thread::current->raw_backtrace(regs.ebp);
604 auto& sample = Profiling::next_sample_slot();
605 sample.pid = Process::current->pid();
606 sample.tid = Thread::current->tid();
607 sample.timestamp = g_uptime;
608 for (size_t i = 0; i < min((size_t)backtrace.size(), Profiling::max_stack_frame_count); ++i) {
609 sample.frames[i] = backtrace[i];
610 }
611 }
612
613 TimerQueue::the().fire();
614
615 if (Thread::current->tick())
616 return;
617
618 auto& outgoing_tss = Thread::current->tss();
619
620 if (!pick_next())
621 return;
622
623 outgoing_tss.gs = regs.gs;
624 outgoing_tss.fs = regs.fs;
625 outgoing_tss.es = regs.es;
626 outgoing_tss.ds = regs.ds;
627 outgoing_tss.edi = regs.edi;
628 outgoing_tss.esi = regs.esi;
629 outgoing_tss.ebp = regs.ebp;
630 outgoing_tss.ebx = regs.ebx;
631 outgoing_tss.edx = regs.edx;
632 outgoing_tss.ecx = regs.ecx;
633 outgoing_tss.eax = regs.eax;
634 outgoing_tss.eip = regs.eip;
635 outgoing_tss.cs = regs.cs;
636 outgoing_tss.eflags = regs.eflags;
637
638 // Compute process stack pointer.
639 // Add 16 for CS, EIP, EFLAGS, exception code (interrupt mechanic)
640 outgoing_tss.esp = regs.esp + 16;
641 outgoing_tss.ss = regs.ss;
642
643 if ((outgoing_tss.cs & 3) != 0) {
644 outgoing_tss.ss = regs.userspace_ss;
645 outgoing_tss.esp = regs.userspace_esp;
646 }
647 prepare_for_iret_to_new_process();
648
649 // Set the NT (nested task) flag.
650 asm(
651 "pushf\n"
652 "orl $0x00004000, (%esp)\n"
653 "popf\n");
654}
655
656static bool s_should_stop_idling = false;
657
658void Scheduler::stop_idling()
659{
660 if (Thread::current != g_colonel)
661 return;
662
663 s_should_stop_idling = true;
664}
665
666void Scheduler::idle_loop()
667{
668 for (;;) {
669 asm("hlt");
670 if (s_should_stop_idling) {
671 s_should_stop_idling = false;
672 yield();
673 }
674 }
675}
676
677}