Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.7 284 lines 7.4 kB view raw
1/* RxRPC point-to-point transport session management 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12#include <linux/module.h> 13#include <linux/net.h> 14#include <linux/skbuff.h> 15#include <linux/slab.h> 16#include <net/sock.h> 17#include <net/af_rxrpc.h> 18#include "ar-internal.h" 19 20/* 21 * Time after last use at which transport record is cleaned up. 22 */ 23unsigned int rxrpc_transport_expiry = 3600 * 24; 24 25static void rxrpc_transport_reaper(struct work_struct *work); 26 27static LIST_HEAD(rxrpc_transports); 28static DEFINE_RWLOCK(rxrpc_transport_lock); 29static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper); 30 31/* 32 * allocate a new transport session manager 33 */ 34static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local, 35 struct rxrpc_peer *peer, 36 gfp_t gfp) 37{ 38 struct rxrpc_transport *trans; 39 40 _enter(""); 41 42 trans = kzalloc(sizeof(struct rxrpc_transport), gfp); 43 if (trans) { 44 trans->local = local; 45 trans->peer = peer; 46 INIT_LIST_HEAD(&trans->link); 47 trans->bundles = RB_ROOT; 48 trans->client_conns = RB_ROOT; 49 trans->server_conns = RB_ROOT; 50 skb_queue_head_init(&trans->error_queue); 51 spin_lock_init(&trans->client_lock); 52 rwlock_init(&trans->conn_lock); 53 atomic_set(&trans->usage, 1); 54 trans->conn_idcounter = peer->srx.srx_service << 16; 55 trans->debug_id = atomic_inc_return(&rxrpc_debug_id); 56 57 if (peer->srx.transport.family == AF_INET) { 58 switch (peer->srx.transport_type) { 59 case SOCK_DGRAM: 60 INIT_WORK(&trans->error_handler, 61 rxrpc_UDP_error_handler); 62 break; 63 default: 64 BUG(); 65 break; 66 } 67 } else { 68 BUG(); 69 } 70 } 71 72 _leave(" = %p", trans); 73 return trans; 74} 75 76/* 77 * obtain a transport session for the nominated endpoints 78 */ 79struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *local, 80 struct rxrpc_peer *peer, 81 gfp_t gfp) 82{ 83 struct rxrpc_transport *trans, *candidate; 84 const char *new = "old"; 85 int usage; 86 87 _enter("{%pI4+%hu},{%pI4+%hu},", 88 &local->srx.transport.sin.sin_addr, 89 ntohs(local->srx.transport.sin.sin_port), 90 &peer->srx.transport.sin.sin_addr, 91 ntohs(peer->srx.transport.sin.sin_port)); 92 93 /* search the transport list first */ 94 read_lock_bh(&rxrpc_transport_lock); 95 list_for_each_entry(trans, &rxrpc_transports, link) { 96 if (trans->local == local && trans->peer == peer) 97 goto found_extant_transport; 98 } 99 read_unlock_bh(&rxrpc_transport_lock); 100 101 /* not yet present - create a candidate for a new record and then 102 * redo the search */ 103 candidate = rxrpc_alloc_transport(local, peer, gfp); 104 if (!candidate) { 105 _leave(" = -ENOMEM"); 106 return ERR_PTR(-ENOMEM); 107 } 108 109 write_lock_bh(&rxrpc_transport_lock); 110 111 list_for_each_entry(trans, &rxrpc_transports, link) { 112 if (trans->local == local && trans->peer == peer) 113 goto found_extant_second; 114 } 115 116 /* we can now add the new candidate to the list */ 117 trans = candidate; 118 candidate = NULL; 119 usage = atomic_read(&trans->usage); 120 121 rxrpc_get_local(trans->local); 122 atomic_inc(&trans->peer->usage); 123 list_add_tail(&trans->link, &rxrpc_transports); 124 write_unlock_bh(&rxrpc_transport_lock); 125 new = "new"; 126 127success: 128 _net("TRANSPORT %s %d local %d -> peer %d", 129 new, 130 trans->debug_id, 131 trans->local->debug_id, 132 trans->peer->debug_id); 133 134 _leave(" = %p {u=%d}", trans, usage); 135 return trans; 136 137 /* we found the transport in the list immediately */ 138found_extant_transport: 139 usage = atomic_inc_return(&trans->usage); 140 read_unlock_bh(&rxrpc_transport_lock); 141 goto success; 142 143 /* we found the transport on the second time through the list */ 144found_extant_second: 145 usage = atomic_inc_return(&trans->usage); 146 write_unlock_bh(&rxrpc_transport_lock); 147 kfree(candidate); 148 goto success; 149} 150 151/* 152 * find the transport connecting two endpoints 153 */ 154struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *local, 155 struct rxrpc_peer *peer) 156{ 157 struct rxrpc_transport *trans; 158 159 _enter("{%pI4+%hu},{%pI4+%hu},", 160 &local->srx.transport.sin.sin_addr, 161 ntohs(local->srx.transport.sin.sin_port), 162 &peer->srx.transport.sin.sin_addr, 163 ntohs(peer->srx.transport.sin.sin_port)); 164 165 /* search the transport list */ 166 read_lock_bh(&rxrpc_transport_lock); 167 168 list_for_each_entry(trans, &rxrpc_transports, link) { 169 if (trans->local == local && trans->peer == peer) 170 goto found_extant_transport; 171 } 172 173 read_unlock_bh(&rxrpc_transport_lock); 174 _leave(" = NULL"); 175 return NULL; 176 177found_extant_transport: 178 atomic_inc(&trans->usage); 179 read_unlock_bh(&rxrpc_transport_lock); 180 _leave(" = %p", trans); 181 return trans; 182} 183 184/* 185 * release a transport session 186 */ 187void rxrpc_put_transport(struct rxrpc_transport *trans) 188{ 189 _enter("%p{u=%d}", trans, atomic_read(&trans->usage)); 190 191 ASSERTCMP(atomic_read(&trans->usage), >, 0); 192 193 trans->put_time = ktime_get_seconds(); 194 if (unlikely(atomic_dec_and_test(&trans->usage))) { 195 _debug("zombie"); 196 /* let the reaper determine the timeout to avoid a race with 197 * overextending the timeout if the reaper is running at the 198 * same time */ 199 rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0); 200 } 201 _leave(""); 202} 203 204/* 205 * clean up a transport session 206 */ 207static void rxrpc_cleanup_transport(struct rxrpc_transport *trans) 208{ 209 _net("DESTROY TRANS %d", trans->debug_id); 210 211 rxrpc_purge_queue(&trans->error_queue); 212 213 rxrpc_put_local(trans->local); 214 rxrpc_put_peer(trans->peer); 215 kfree(trans); 216} 217 218/* 219 * reap dead transports that have passed their expiry date 220 */ 221static void rxrpc_transport_reaper(struct work_struct *work) 222{ 223 struct rxrpc_transport *trans, *_p; 224 unsigned long now, earliest, reap_time; 225 226 LIST_HEAD(graveyard); 227 228 _enter(""); 229 230 now = ktime_get_seconds(); 231 earliest = ULONG_MAX; 232 233 /* extract all the transports that have been dead too long */ 234 write_lock_bh(&rxrpc_transport_lock); 235 list_for_each_entry_safe(trans, _p, &rxrpc_transports, link) { 236 _debug("reap TRANS %d { u=%d t=%ld }", 237 trans->debug_id, atomic_read(&trans->usage), 238 (long) now - (long) trans->put_time); 239 240 if (likely(atomic_read(&trans->usage) > 0)) 241 continue; 242 243 reap_time = trans->put_time + rxrpc_transport_expiry; 244 if (reap_time <= now) 245 list_move_tail(&trans->link, &graveyard); 246 else if (reap_time < earliest) 247 earliest = reap_time; 248 } 249 write_unlock_bh(&rxrpc_transport_lock); 250 251 if (earliest != ULONG_MAX) { 252 _debug("reschedule reaper %ld", (long) earliest - now); 253 ASSERTCMP(earliest, >, now); 254 rxrpc_queue_delayed_work(&rxrpc_transport_reap, 255 (earliest - now) * HZ); 256 } 257 258 /* then destroy all those pulled out */ 259 while (!list_empty(&graveyard)) { 260 trans = list_entry(graveyard.next, struct rxrpc_transport, 261 link); 262 list_del_init(&trans->link); 263 264 ASSERTCMP(atomic_read(&trans->usage), ==, 0); 265 rxrpc_cleanup_transport(trans); 266 } 267 268 _leave(""); 269} 270 271/* 272 * preemptively destroy all the transport session records rather than waiting 273 * for them to time out 274 */ 275void __exit rxrpc_destroy_all_transports(void) 276{ 277 _enter(""); 278 279 rxrpc_transport_expiry = 0; 280 cancel_delayed_work(&rxrpc_transport_reap); 281 rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0); 282 283 _leave(""); 284}