Serenity Operating System
at master 658 lines 24 kB view raw
1/* 2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org> 3 * 4 * SPDX-License-Identifier: BSD-2-Clause 5 */ 6 7#include <AK/Singleton.h> 8#include <AK/Time.h> 9#include <Kernel/Debug.h> 10#include <Kernel/Devices/RandomDevice.h> 11#include <Kernel/FileSystem/OpenFileDescription.h> 12#include <Kernel/Locking/MutexProtected.h> 13#include <Kernel/Net/EthernetFrameHeader.h> 14#include <Kernel/Net/IPv4.h> 15#include <Kernel/Net/NetworkAdapter.h> 16#include <Kernel/Net/NetworkingManagement.h> 17#include <Kernel/Net/Routing.h> 18#include <Kernel/Net/TCP.h> 19#include <Kernel/Net/TCPSocket.h> 20#include <Kernel/Process.h> 21#include <Kernel/Random.h> 22 23namespace Kernel { 24 25void TCPSocket::for_each(Function<void(TCPSocket const&)> callback) 26{ 27 sockets_by_tuple().for_each_shared([&](auto const& it) { 28 callback(*it.value); 29 }); 30} 31 32ErrorOr<void> TCPSocket::try_for_each(Function<ErrorOr<void>(TCPSocket const&)> callback) 33{ 34 return sockets_by_tuple().with_shared([&](auto const& sockets) -> ErrorOr<void> { 35 for (auto& it : sockets) 36 TRY(callback(*it.value)); 37 return {}; 38 }); 39} 40 41bool TCPSocket::unref() const 42{ 43 bool did_hit_zero = sockets_by_tuple().with_exclusive([&](auto& table) { 44 if (deref_base()) 45 return false; 46 table.remove(tuple()); 47 const_cast<TCPSocket&>(*this).revoke_weak_ptrs(); 48 return true; 49 }); 50 if (did_hit_zero) { 51 const_cast<TCPSocket&>(*this).will_be_destroyed(); 52 delete this; 53 } 54 return did_hit_zero; 55} 56 57void TCPSocket::set_state(State new_state) 58{ 59 dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket({}) state moving from {} to {}", this, to_string(m_state), to_string(new_state)); 60 61 auto was_disconnected = protocol_is_disconnected(); 62 auto previous_role = m_role; 63 64 m_state = new_state; 65 66 if (new_state == State::Established && m_direction == Direction::Outgoing) { 67 set_role(Role::Connected); 68 clear_so_error(); 69 } 70 71 if (new_state == State::TimeWait) { 72 // Once we hit TimeWait, we are only holding the socket in case there 73 // are packets on the way which we wouldn't want a new socket to get hit 74 // with, so there's no point in keeping the receive buffer around. 75 drop_receive_buffer(); 76 } 77 78 if (new_state == State::Closed) { 79 closing_sockets().with_exclusive([&](auto& table) { 80 table.remove(tuple()); 81 }); 82 83 if (m_originator) 84 release_to_originator(); 85 } 86 87 if (previous_role != m_role || was_disconnected != protocol_is_disconnected()) 88 evaluate_block_conditions(); 89} 90 91static Singleton<MutexProtected<HashMap<IPv4SocketTuple, RefPtr<TCPSocket>>>> s_socket_closing; 92 93MutexProtected<HashMap<IPv4SocketTuple, RefPtr<TCPSocket>>>& TCPSocket::closing_sockets() 94{ 95 return *s_socket_closing; 96} 97 98static Singleton<MutexProtected<HashMap<IPv4SocketTuple, TCPSocket*>>> s_socket_tuples; 99 100MutexProtected<HashMap<IPv4SocketTuple, TCPSocket*>>& TCPSocket::sockets_by_tuple() 101{ 102 return *s_socket_tuples; 103} 104 105RefPtr<TCPSocket> TCPSocket::from_tuple(IPv4SocketTuple const& tuple) 106{ 107 return sockets_by_tuple().with_shared([&](auto const& table) -> RefPtr<TCPSocket> { 108 auto exact_match = table.get(tuple); 109 if (exact_match.has_value()) 110 return { *exact_match.value() }; 111 112 auto address_tuple = IPv4SocketTuple(tuple.local_address(), tuple.local_port(), IPv4Address(), 0); 113 auto address_match = table.get(address_tuple); 114 if (address_match.has_value()) 115 return { *address_match.value() }; 116 117 auto wildcard_tuple = IPv4SocketTuple(IPv4Address(), tuple.local_port(), IPv4Address(), 0); 118 auto wildcard_match = table.get(wildcard_tuple); 119 if (wildcard_match.has_value()) 120 return { *wildcard_match.value() }; 121 122 return {}; 123 }); 124} 125ErrorOr<NonnullRefPtr<TCPSocket>> TCPSocket::try_create_client(IPv4Address const& new_local_address, u16 new_local_port, IPv4Address const& new_peer_address, u16 new_peer_port) 126{ 127 auto tuple = IPv4SocketTuple(new_local_address, new_local_port, new_peer_address, new_peer_port); 128 return sockets_by_tuple().with_exclusive([&](auto& table) -> ErrorOr<NonnullRefPtr<TCPSocket>> { 129 if (table.contains(tuple)) 130 return EEXIST; 131 132 auto receive_buffer = TRY(try_create_receive_buffer()); 133 auto client = TRY(TCPSocket::try_create(protocol(), move(receive_buffer))); 134 135 client->set_setup_state(SetupState::InProgress); 136 client->set_local_address(new_local_address); 137 client->set_local_port(new_local_port); 138 client->set_peer_address(new_peer_address); 139 client->set_peer_port(new_peer_port); 140 client->set_direction(Direction::Incoming); 141 client->set_originator(*this); 142 143 m_pending_release_for_accept.set(tuple, client); 144 table.set(tuple, client); 145 146 return { move(client) }; 147 }); 148} 149 150void TCPSocket::release_to_originator() 151{ 152 VERIFY(!!m_originator); 153 m_originator.strong_ref()->release_for_accept(*this); 154 m_originator.clear(); 155} 156 157void TCPSocket::release_for_accept(NonnullRefPtr<TCPSocket> socket) 158{ 159 VERIFY(m_pending_release_for_accept.contains(socket->tuple())); 160 m_pending_release_for_accept.remove(socket->tuple()); 161 // FIXME: Should we observe this error somehow? 162 [[maybe_unused]] auto rc = queue_connection_from(move(socket)); 163} 164 165TCPSocket::TCPSocket(int protocol, NonnullOwnPtr<DoubleBuffer> receive_buffer, NonnullOwnPtr<KBuffer> scratch_buffer) 166 : IPv4Socket(SOCK_STREAM, protocol, move(receive_buffer), move(scratch_buffer)) 167{ 168 m_last_retransmit_time = kgettimeofday(); 169} 170 171TCPSocket::~TCPSocket() 172{ 173 dequeue_for_retransmit(); 174 175 dbgln_if(TCP_SOCKET_DEBUG, "~TCPSocket in state {}", to_string(state())); 176} 177 178ErrorOr<NonnullRefPtr<TCPSocket>> TCPSocket::try_create(int protocol, NonnullOwnPtr<DoubleBuffer> receive_buffer) 179{ 180 // Note: Scratch buffer is only used for SOCK_STREAM sockets. 181 auto scratch_buffer = TRY(KBuffer::try_create_with_size("TCPSocket: Scratch buffer"sv, 65536)); 182 return adopt_nonnull_ref_or_enomem(new (nothrow) TCPSocket(protocol, move(receive_buffer), move(scratch_buffer))); 183} 184 185ErrorOr<size_t> TCPSocket::protocol_size(ReadonlyBytes raw_ipv4_packet) 186{ 187 auto& ipv4_packet = *reinterpret_cast<IPv4Packet const*>(raw_ipv4_packet.data()); 188 auto& tcp_packet = *static_cast<TCPPacket const*>(ipv4_packet.payload()); 189 return raw_ipv4_packet.size() - sizeof(IPv4Packet) - tcp_packet.header_size(); 190} 191 192ErrorOr<size_t> TCPSocket::protocol_receive(ReadonlyBytes raw_ipv4_packet, UserOrKernelBuffer& buffer, size_t buffer_size, [[maybe_unused]] int flags) 193{ 194 auto& ipv4_packet = *reinterpret_cast<IPv4Packet const*>(raw_ipv4_packet.data()); 195 auto& tcp_packet = *static_cast<TCPPacket const*>(ipv4_packet.payload()); 196 size_t payload_size = raw_ipv4_packet.size() - sizeof(IPv4Packet) - tcp_packet.header_size(); 197 dbgln_if(TCP_SOCKET_DEBUG, "payload_size {}, will it fit in {}?", payload_size, buffer_size); 198 VERIFY(buffer_size >= payload_size); 199 SOCKET_TRY(buffer.write(tcp_packet.payload(), payload_size)); 200 return payload_size; 201} 202 203ErrorOr<size_t> TCPSocket::protocol_send(UserOrKernelBuffer const& data, size_t data_length) 204{ 205 RoutingDecision routing_decision = route_to(peer_address(), local_address(), bound_interface()); 206 if (routing_decision.is_zero()) 207 return set_so_error(EHOSTUNREACH); 208 size_t mss = routing_decision.adapter->mtu() - sizeof(IPv4Packet) - sizeof(TCPPacket); 209 data_length = min(data_length, mss); 210 TRY(send_tcp_packet(TCPFlags::PSH | TCPFlags::ACK, &data, data_length, &routing_decision)); 211 return data_length; 212} 213 214ErrorOr<void> TCPSocket::send_ack(bool allow_duplicate) 215{ 216 if (!allow_duplicate && m_last_ack_number_sent == m_ack_number) 217 return {}; 218 return send_tcp_packet(TCPFlags::ACK); 219} 220 221ErrorOr<void> TCPSocket::send_tcp_packet(u16 flags, UserOrKernelBuffer const* payload, size_t payload_size, RoutingDecision* user_routing_decision) 222{ 223 RoutingDecision routing_decision = user_routing_decision ? *user_routing_decision : route_to(peer_address(), local_address(), bound_interface()); 224 if (routing_decision.is_zero()) 225 return set_so_error(EHOSTUNREACH); 226 227 auto ipv4_payload_offset = routing_decision.adapter->ipv4_payload_offset(); 228 229 bool const has_mss_option = flags == TCPFlags::SYN; 230 const size_t options_size = has_mss_option ? sizeof(TCPOptionMSS) : 0; 231 const size_t tcp_header_size = sizeof(TCPPacket) + options_size; 232 const size_t buffer_size = ipv4_payload_offset + tcp_header_size + payload_size; 233 auto packet = routing_decision.adapter->acquire_packet_buffer(buffer_size); 234 if (!packet) 235 return set_so_error(ENOMEM); 236 routing_decision.adapter->fill_in_ipv4_header(*packet, local_address(), 237 routing_decision.next_hop, peer_address(), IPv4Protocol::TCP, 238 buffer_size - ipv4_payload_offset, type_of_service(), ttl()); 239 memset(packet->buffer->data() + ipv4_payload_offset, 0, sizeof(TCPPacket)); 240 auto& tcp_packet = *(TCPPacket*)(packet->buffer->data() + ipv4_payload_offset); 241 VERIFY(local_port()); 242 tcp_packet.set_source_port(local_port()); 243 tcp_packet.set_destination_port(peer_port()); 244 tcp_packet.set_window_size(NumericLimits<u16>::max()); 245 tcp_packet.set_sequence_number(m_sequence_number); 246 tcp_packet.set_data_offset(tcp_header_size / sizeof(u32)); 247 tcp_packet.set_flags(flags); 248 249 if (payload) { 250 if (auto result = payload->read(tcp_packet.payload(), payload_size); result.is_error()) { 251 routing_decision.adapter->release_packet_buffer(*packet); 252 return set_so_error(result.release_error()); 253 } 254 } 255 256 if (flags & TCPFlags::ACK) { 257 m_last_ack_number_sent = m_ack_number; 258 m_last_ack_sent_time = kgettimeofday(); 259 tcp_packet.set_ack_number(m_ack_number); 260 } 261 262 if (flags & TCPFlags::SYN) { 263 ++m_sequence_number; 264 } else { 265 m_sequence_number += payload_size; 266 } 267 268 if (has_mss_option) { 269 u16 mss = routing_decision.adapter->mtu() - sizeof(IPv4Packet) - sizeof(TCPPacket); 270 TCPOptionMSS mss_option { mss }; 271 VERIFY(packet->buffer->size() >= ipv4_payload_offset + sizeof(TCPPacket) + sizeof(mss_option)); 272 memcpy(packet->buffer->data() + ipv4_payload_offset + sizeof(TCPPacket), &mss_option, sizeof(mss_option)); 273 } 274 275 tcp_packet.set_checksum(compute_tcp_checksum(local_address(), peer_address(), tcp_packet, payload_size)); 276 277 bool expect_ack { tcp_packet.has_syn() || payload_size > 0 }; 278 if (expect_ack) { 279 bool append_failed { false }; 280 m_unacked_packets.with_exclusive([&](auto& unacked_packets) { 281 auto result = unacked_packets.packets.try_append({ m_sequence_number, packet, ipv4_payload_offset, *routing_decision.adapter }); 282 if (result.is_error()) { 283 dbgln("TCPSocket: Dropped outbound packet because try_append() failed"); 284 append_failed = true; 285 return; 286 } 287 unacked_packets.size += payload_size; 288 enqueue_for_retransmit(); 289 }); 290 if (append_failed) 291 return set_so_error(ENOMEM); 292 } 293 294 m_packets_out++; 295 m_bytes_out += buffer_size; 296 routing_decision.adapter->send_packet(packet->bytes()); 297 if (!expect_ack) 298 routing_decision.adapter->release_packet_buffer(*packet); 299 300 return {}; 301} 302 303void TCPSocket::receive_tcp_packet(TCPPacket const& packet, u16 size) 304{ 305 if (packet.has_ack()) { 306 u32 ack_number = packet.ack_number(); 307 308 dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket: receive_tcp_packet: {}", ack_number); 309 310 int removed = 0; 311 m_unacked_packets.with_exclusive([&](auto& unacked_packets) { 312 while (!unacked_packets.packets.is_empty()) { 313 auto& packet = unacked_packets.packets.first(); 314 315 dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket: iterate: {}", packet.ack_number); 316 317 if (packet.ack_number <= ack_number) { 318 auto old_adapter = packet.adapter.strong_ref(); 319 if (old_adapter) 320 old_adapter->release_packet_buffer(*packet.buffer); 321 TCPPacket& tcp_packet = *(TCPPacket*)(packet.buffer->buffer->data() + packet.ipv4_payload_offset); 322 auto payload_size = packet.buffer->buffer->data() + packet.buffer->buffer->size() - (u8*)tcp_packet.payload(); 323 unacked_packets.size -= payload_size; 324 evaluate_block_conditions(); 325 unacked_packets.packets.take_first(); 326 removed++; 327 } else { 328 break; 329 } 330 } 331 332 if (unacked_packets.packets.is_empty()) { 333 m_retransmit_attempts = 0; 334 dequeue_for_retransmit(); 335 } 336 337 dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket: receive_tcp_packet acknowledged {} packets", removed); 338 }); 339 } 340 341 m_packets_in++; 342 m_bytes_in += packet.header_size() + size; 343} 344 345bool TCPSocket::should_delay_next_ack() const 346{ 347 // FIXME: We don't know the MSS here so make a reasonable guess. 348 const size_t mss = 1500; 349 350 // RFC 1122 says we should send an ACK for every two full-sized segments. 351 if (m_ack_number >= m_last_ack_number_sent + 2 * mss) 352 return false; 353 354 // RFC 1122 says we should not delay ACKs for more than 500 milliseconds. 355 if (kgettimeofday() >= m_last_ack_sent_time + Time::from_milliseconds(500)) 356 return false; 357 358 return true; 359} 360 361NetworkOrdered<u16> TCPSocket::compute_tcp_checksum(IPv4Address const& source, IPv4Address const& destination, TCPPacket const& packet, u16 payload_size) 362{ 363 union PseudoHeader { 364 struct [[gnu::packed]] { 365 IPv4Address source; 366 IPv4Address destination; 367 u8 zero; 368 u8 protocol; 369 NetworkOrdered<u16> payload_size; 370 } header; 371 u16 raw[6]; 372 }; 373 static_assert(sizeof(PseudoHeader) == 12); 374 375 Checked<u16> packet_size = packet.header_size(); 376 packet_size += payload_size; 377 VERIFY(!packet_size.has_overflow()); 378 379 PseudoHeader pseudo_header { .header = { source, destination, 0, (u8)IPv4Protocol::TCP, packet_size.value() } }; 380 381 u32 checksum = 0; 382 auto* raw_pseudo_header = pseudo_header.raw; 383 for (size_t i = 0; i < sizeof(pseudo_header) / sizeof(u16); ++i) { 384 checksum += AK::convert_between_host_and_network_endian(raw_pseudo_header[i]); 385 if (checksum > 0xffff) 386 checksum = (checksum >> 16) + (checksum & 0xffff); 387 } 388 auto* raw_packet = bit_cast<u16*>(&packet); 389 for (size_t i = 0; i < packet.header_size() / sizeof(u16); ++i) { 390 checksum += AK::convert_between_host_and_network_endian(raw_packet[i]); 391 if (checksum > 0xffff) 392 checksum = (checksum >> 16) + (checksum & 0xffff); 393 } 394 VERIFY(packet.data_offset() * 4 == packet.header_size()); 395 auto* raw_payload = bit_cast<u16*>(packet.payload()); 396 for (size_t i = 0; i < payload_size / sizeof(u16); ++i) { 397 checksum += AK::convert_between_host_and_network_endian(raw_payload[i]); 398 if (checksum > 0xffff) 399 checksum = (checksum >> 16) + (checksum & 0xffff); 400 } 401 if (payload_size & 1) { 402 u16 expanded_byte = ((u8 const*)packet.payload())[payload_size - 1] << 8; 403 checksum += expanded_byte; 404 if (checksum > 0xffff) 405 checksum = (checksum >> 16) + (checksum & 0xffff); 406 } 407 return ~(checksum & 0xffff); 408} 409 410ErrorOr<void> TCPSocket::protocol_bind() 411{ 412 if (has_specific_local_address() && !m_adapter) { 413 m_adapter = NetworkingManagement::the().from_ipv4_address(local_address()); 414 if (!m_adapter) 415 return set_so_error(EADDRNOTAVAIL); 416 } 417 418 return {}; 419} 420 421ErrorOr<void> TCPSocket::protocol_listen(bool did_allocate_port) 422{ 423 if (!did_allocate_port) { 424 bool ok = sockets_by_tuple().with_exclusive([&](auto& table) -> bool { 425 if (table.contains(tuple())) 426 return false; 427 table.set(tuple(), this); 428 return true; 429 }); 430 if (!ok) 431 return set_so_error(EADDRINUSE); 432 } 433 434 set_direction(Direction::Passive); 435 set_state(State::Listen); 436 set_setup_state(SetupState::Completed); 437 return {}; 438} 439 440ErrorOr<void> TCPSocket::protocol_connect(OpenFileDescription& description) 441{ 442 MutexLocker locker(mutex()); 443 444 auto routing_decision = route_to(peer_address(), local_address()); 445 if (routing_decision.is_zero()) 446 return set_so_error(EHOSTUNREACH); 447 if (!has_specific_local_address()) 448 set_local_address(routing_decision.adapter->ipv4_address()); 449 450 if (auto result = allocate_local_port_if_needed(); result.error_or_port.is_error()) 451 return result.error_or_port.release_error(); 452 453 m_sequence_number = get_good_random<u32>(); 454 m_ack_number = 0; 455 456 set_setup_state(SetupState::InProgress); 457 TRY(send_tcp_packet(TCPFlags::SYN)); 458 m_state = State::SynSent; 459 set_role(Role::Connecting); 460 m_direction = Direction::Outgoing; 461 462 evaluate_block_conditions(); 463 464 if (description.is_blocking()) { 465 locker.unlock(); 466 auto unblock_flags = Thread::FileBlocker::BlockFlags::None; 467 if (Thread::current()->block<Thread::ConnectBlocker>({}, description, unblock_flags).was_interrupted()) 468 return set_so_error(EINTR); 469 locker.lock(); 470 VERIFY(setup_state() == SetupState::Completed); 471 if (has_error()) { // TODO: check unblock_flags 472 set_role(Role::None); 473 if (error() == TCPSocket::Error::RetransmitTimeout) 474 return set_so_error(ETIMEDOUT); 475 else 476 return set_so_error(ECONNREFUSED); 477 } 478 return {}; 479 } 480 481 return set_so_error(EINPROGRESS); 482} 483 484ErrorOr<u16> TCPSocket::protocol_allocate_local_port() 485{ 486 constexpr u16 first_ephemeral_port = 32768; 487 constexpr u16 last_ephemeral_port = 60999; 488 constexpr u16 ephemeral_port_range_size = last_ephemeral_port - first_ephemeral_port; 489 u16 first_scan_port = first_ephemeral_port + get_good_random<u16>() % ephemeral_port_range_size; 490 491 return sockets_by_tuple().with_exclusive([&](auto& table) -> ErrorOr<u16> { 492 for (u16 port = first_scan_port;;) { 493 IPv4SocketTuple proposed_tuple(local_address(), port, peer_address(), peer_port()); 494 495 auto it = table.find(proposed_tuple); 496 if (it == table.end()) { 497 set_local_port(port); 498 table.set(proposed_tuple, this); 499 return port; 500 } 501 ++port; 502 if (port > last_ephemeral_port) 503 port = first_ephemeral_port; 504 if (port == first_scan_port) 505 break; 506 } 507 return set_so_error(EADDRINUSE); 508 }); 509} 510 511bool TCPSocket::protocol_is_disconnected() const 512{ 513 switch (m_state) { 514 case State::Closed: 515 case State::CloseWait: 516 case State::LastAck: 517 case State::FinWait1: 518 case State::FinWait2: 519 case State::Closing: 520 case State::TimeWait: 521 return true; 522 default: 523 return false; 524 } 525} 526 527void TCPSocket::shut_down_for_writing() 528{ 529 if (state() == State::Established) { 530 dbgln_if(TCP_SOCKET_DEBUG, " Sending FIN from Established and moving into FinWait1"); 531 (void)send_tcp_packet(TCPFlags::FIN); 532 set_state(State::FinWait1); 533 } else { 534 dbgln(" Shutting down TCPSocket for writing but not moving to FinWait1 since state is {}", to_string(state())); 535 } 536} 537 538ErrorOr<void> TCPSocket::close() 539{ 540 MutexLocker locker(mutex()); 541 auto result = IPv4Socket::close(); 542 if (state() == State::CloseWait) { 543 dbgln_if(TCP_SOCKET_DEBUG, " Sending FIN from CloseWait and moving into LastAck"); 544 [[maybe_unused]] auto rc = send_tcp_packet(TCPFlags::FIN | TCPFlags::ACK); 545 set_state(State::LastAck); 546 } 547 548 if (state() != State::Closed && state() != State::Listen) 549 closing_sockets().with_exclusive([&](auto& table) { 550 table.set(tuple(), *this); 551 }); 552 return result; 553} 554 555static Singleton<MutexProtected<TCPSocket::RetransmitList>> s_sockets_for_retransmit; 556 557MutexProtected<TCPSocket::RetransmitList>& TCPSocket::sockets_for_retransmit() 558{ 559 return *s_sockets_for_retransmit; 560} 561 562void TCPSocket::enqueue_for_retransmit() 563{ 564 sockets_for_retransmit().with_exclusive([&](auto& list) { 565 list.append(*this); 566 }); 567} 568 569void TCPSocket::dequeue_for_retransmit() 570{ 571 sockets_for_retransmit().with_exclusive([&](auto& list) { 572 list.remove(*this); 573 }); 574} 575 576void TCPSocket::retransmit_packets() 577{ 578 auto now = kgettimeofday(); 579 580 // RFC6298 says we should have at least one second between retransmits. According to 581 // RFC1122 we must do exponential backoff - even for SYN packets. 582 i64 retransmit_interval = 1; 583 for (decltype(m_retransmit_attempts) i = 0; i < m_retransmit_attempts; i++) 584 retransmit_interval *= 2; 585 586 if (m_last_retransmit_time > now - Time::from_seconds(retransmit_interval)) 587 return; 588 589 dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket({}) handling retransmit", this); 590 591 m_last_retransmit_time = now; 592 ++m_retransmit_attempts; 593 594 if (m_retransmit_attempts > maximum_retransmits) { 595 set_state(TCPSocket::State::Closed); 596 set_error(TCPSocket::Error::RetransmitTimeout); 597 set_setup_state(Socket::SetupState::Completed); 598 return; 599 } 600 601 auto routing_decision = route_to(peer_address(), local_address(), bound_interface()); 602 if (routing_decision.is_zero()) 603 return; 604 605 m_unacked_packets.with_exclusive([&](auto& unacked_packets) { 606 for (auto& packet : unacked_packets.packets) { 607 packet.tx_counter++; 608 609 if constexpr (TCP_SOCKET_DEBUG) { 610 auto& tcp_packet = *(const TCPPacket*)(packet.buffer->buffer->data() + packet.ipv4_payload_offset); 611 dbgln("Sending TCP packet from {}:{} to {}:{} with ({}{}{}{}) seq_no={}, ack_no={}, tx_counter={}", 612 local_address(), local_port(), 613 peer_address(), peer_port(), 614 (tcp_packet.has_syn() ? "SYN " : ""), 615 (tcp_packet.has_ack() ? "ACK " : ""), 616 (tcp_packet.has_fin() ? "FIN " : ""), 617 (tcp_packet.has_rst() ? "RST " : ""), 618 tcp_packet.sequence_number(), 619 tcp_packet.ack_number(), 620 packet.tx_counter); 621 } 622 623 size_t ipv4_payload_offset = routing_decision.adapter->ipv4_payload_offset(); 624 if (ipv4_payload_offset != packet.ipv4_payload_offset) { 625 // FIXME: Add support for this. This can happen if after a route change 626 // we ended up on another adapter which doesn't have the same layer 2 type 627 // like the previous adapter. 628 VERIFY_NOT_REACHED(); 629 } 630 631 auto packet_buffer = packet.buffer->bytes(); 632 633 routing_decision.adapter->fill_in_ipv4_header(*packet.buffer, 634 local_address(), routing_decision.next_hop, peer_address(), 635 IPv4Protocol::TCP, packet_buffer.size() - ipv4_payload_offset, type_of_service(), ttl()); 636 routing_decision.adapter->send_packet(packet_buffer); 637 m_packets_out++; 638 m_bytes_out += packet_buffer.size(); 639 } 640 }); 641} 642 643bool TCPSocket::can_write(OpenFileDescription const& file_description, u64 size) const 644{ 645 if (!IPv4Socket::can_write(file_description, size)) 646 return false; 647 648 if (m_state == State::SynSent || m_state == State::SynReceived) 649 return false; 650 651 if (!file_description.is_blocking()) 652 return true; 653 654 return m_unacked_packets.with_shared([&](auto& unacked_packets) { 655 return unacked_packets.size + size <= m_send_window_size; 656 }); 657} 658}