[PATCH] Unlinline a bunch of other functions

Remove the "inline" keyword from a bunch of big functions in the kernel with
the goal of shrinking it by 30kb to 40kb

Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Jeff Garzik <jgarzik@pobox.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Arjan van de Ven and committed by Linus Torvalds 858119e1 b0a9499c

+226 -227
+1 -1
drivers/acpi/ec.c
··· 153 Transaction Management 154 -------------------------------------------------------------------------- */ 155 156 - static inline u32 acpi_ec_read_status(union acpi_ec *ec) 157 { 158 u32 status = 0; 159
··· 153 Transaction Management 154 -------------------------------------------------------------------------- */ 155 156 + static u32 acpi_ec_read_status(union acpi_ec *ec) 157 { 158 u32 status = 0; 159
+1 -1
drivers/base/firmware_class.c
··· 48 struct timer_list timeout; 49 }; 50 51 - static inline void 52 fw_load_abort(struct firmware_priv *fw_priv) 53 { 54 set_bit(FW_STATUS_ABORT, &fw_priv->status);
··· 48 struct timer_list timeout; 49 }; 50 51 + static void 52 fw_load_abort(struct firmware_priv *fw_priv) 53 { 54 set_bit(FW_STATUS_ABORT, &fw_priv->status);
+1 -1
drivers/block/loop.c
··· 294 * This helper just factors out common code between do_lo_send_direct_write() 295 * and do_lo_send_write(). 296 */ 297 - static inline int __do_lo_send_write(struct file *file, 298 u8 __user *buf, const int len, loff_t pos) 299 { 300 ssize_t bw;
··· 294 * This helper just factors out common code between do_lo_send_direct_write() 295 * and do_lo_send_write(). 296 */ 297 + static int __do_lo_send_write(struct file *file, 298 u8 __user *buf, const int len, loff_t pos) 299 { 300 ssize_t bw;
+1 -1
drivers/bluetooth/hci_bcsp.c
··· 494 } 495 } 496 497 - static inline void bcsp_complete_rx_pkt(struct hci_uart *hu) 498 { 499 struct bcsp_struct *bcsp = hu->priv; 500 int pass_up;
··· 494 } 495 } 496 497 + static void bcsp_complete_rx_pkt(struct hci_uart *hu) 498 { 499 struct bcsp_struct *bcsp = hu->priv; 500 int pass_up;
+1 -1
drivers/char/drm/r128_state.c
··· 220 ADVANCE_RING(); 221 } 222 223 - static __inline__ void r128_emit_state(drm_r128_private_t * dev_priv) 224 { 225 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 226 unsigned int dirty = sarea_priv->dirty;
··· 220 ADVANCE_RING(); 221 } 222 223 + static void r128_emit_state(drm_r128_private_t * dev_priv) 224 { 225 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 226 unsigned int dirty = sarea_priv->dirty;
+3 -4
drivers/cpufreq/cpufreq.c
··· 41 /* internal prototypes */ 42 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); 43 static void handle_update(void *data); 44 - static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci); 45 46 /** 47 * Two notifier lists: the "policy" list is involved in the ··· 126 static unsigned int disable_ratelimit = 1; 127 static DEFINE_SPINLOCK(disable_ratelimit_lock); 128 129 - static inline void cpufreq_debug_enable_ratelimit(void) 130 { 131 unsigned long flags; 132 ··· 136 spin_unlock_irqrestore(&disable_ratelimit_lock, flags); 137 } 138 139 - static inline void cpufreq_debug_disable_ratelimit(void) 140 { 141 unsigned long flags; 142 ··· 205 static unsigned long l_p_j_ref; 206 static unsigned int l_p_j_ref_freq; 207 208 - static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 209 { 210 if (ci->flags & CPUFREQ_CONST_LOOPS) 211 return;
··· 41 /* internal prototypes */ 42 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); 43 static void handle_update(void *data); 44 45 /** 46 * Two notifier lists: the "policy" list is involved in the ··· 127 static unsigned int disable_ratelimit = 1; 128 static DEFINE_SPINLOCK(disable_ratelimit_lock); 129 130 + static void cpufreq_debug_enable_ratelimit(void) 131 { 132 unsigned long flags; 133 ··· 137 spin_unlock_irqrestore(&disable_ratelimit_lock, flags); 138 } 139 140 + static void cpufreq_debug_disable_ratelimit(void) 141 { 142 unsigned long flags; 143 ··· 206 static unsigned long l_p_j_ref; 207 static unsigned int l_p_j_ref_freq; 208 209 + static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 210 { 211 if (ci->flags & CPUFREQ_CONST_LOOPS) 212 return;
+2 -2
drivers/ide/ide-cd.c
··· 980 * and attempt to recover if there are problems. Returns 0 if everything's 981 * ok; nonzero if the request has been terminated. 982 */ 983 - static inline 984 int cdrom_read_check_ireason (ide_drive_t *drive, int len, int ireason) 985 { 986 if (ireason == 2) ··· 1539 /* 1540 * Write handling 1541 */ 1542 - static inline int cdrom_write_check_ireason(ide_drive_t *drive, int len, int ireason) 1543 { 1544 /* Two notes about IDE interrupt reason here - 0 means that 1545 * the drive wants to receive data from us, 2 means that
··· 980 * and attempt to recover if there are problems. Returns 0 if everything's 981 * ok; nonzero if the request has been terminated. 982 */ 983 + static 984 int cdrom_read_check_ireason (ide_drive_t *drive, int len, int ireason) 985 { 986 if (ireason == 2) ··· 1539 /* 1540 * Write handling 1541 */ 1542 + static int cdrom_write_check_ireason(ide_drive_t *drive, int len, int ireason) 1543 { 1544 /* Two notes about IDE interrupt reason here - 0 means that 1545 * the drive wants to receive data from us, 2 means that
+1 -1
drivers/ide/ide-disk.c
··· 477 && id->lba_capacity_2; 478 } 479 480 - static inline void idedisk_check_hpa(ide_drive_t *drive) 481 { 482 unsigned long long capacity, set_max; 483 int lba48 = idedisk_supports_lba48(drive->id);
··· 477 && id->lba_capacity_2; 478 } 479 480 + static void idedisk_check_hpa(ide_drive_t *drive) 481 { 482 unsigned long long capacity, set_max; 483 int lba48 = idedisk_supports_lba48(drive->id);
+1 -1
drivers/ide/ide-taskfile.c
··· 308 ide_pio_sector(drive, write); 309 } 310 311 - static inline void ide_pio_datablock(ide_drive_t *drive, struct request *rq, 312 unsigned int write) 313 { 314 if (rq->bio) /* fs request */
··· 308 ide_pio_sector(drive, write); 309 } 310 311 + static void ide_pio_datablock(ide_drive_t *drive, struct request *rq, 312 unsigned int write) 313 { 314 if (rq->bio) /* fs request */
+2 -2
drivers/infiniband/core/cm.c
··· 856 param->private_data_len); 857 } 858 859 - static inline int cm_validate_req_param(struct ib_cm_req_param *param) 860 { 861 /* peer-to-peer not supported */ 862 if (param->peer_to_peer) ··· 1005 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); 1006 } 1007 1008 - static inline void cm_format_paths_from_req(struct cm_req_msg *req_msg, 1009 struct ib_sa_path_rec *primary_path, 1010 struct ib_sa_path_rec *alt_path) 1011 {
··· 856 param->private_data_len); 857 } 858 859 + static int cm_validate_req_param(struct ib_cm_req_param *param) 860 { 861 /* peer-to-peer not supported */ 862 if (param->peer_to_peer) ··· 1005 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); 1006 } 1007 1008 + static void cm_format_paths_from_req(struct cm_req_msg *req_msg, 1009 struct ib_sa_path_rec *primary_path, 1010 struct ib_sa_path_rec *alt_path) 1011 {
+1 -1
drivers/isdn/hisax/avm_pci.c
··· 358 } 359 } 360 361 - static inline void 362 HDLC_irq(struct BCState *bcs, u_int stat) { 363 int len; 364 struct sk_buff *skb;
··· 358 } 359 } 360 361 + static void 362 HDLC_irq(struct BCState *bcs, u_int stat) { 363 int len; 364 struct sk_buff *skb;
+1 -1
drivers/isdn/hisax/diva.c
··· 476 } 477 } 478 479 - static inline void 480 Memhscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx) 481 { 482 u_char r;
··· 476 } 477 } 478 479 + static void 480 Memhscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx) 481 { 482 u_char r;
+2 -2
drivers/isdn/hisax/hscx_irq.c
··· 119 } 120 } 121 122 - static inline void 123 hscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx) 124 { 125 u_char r; ··· 221 } 222 } 223 224 - static inline void 225 hscx_int_main(struct IsdnCardState *cs, u_char val) 226 { 227
··· 119 } 120 } 121 122 + static void 123 hscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx) 124 { 125 u_char r; ··· 221 } 222 } 223 224 + static void 225 hscx_int_main(struct IsdnCardState *cs, u_char val) 226 { 227
+1 -1
drivers/isdn/hisax/jade_irq.c
··· 110 } 111 112 113 - static inline void 114 jade_interrupt(struct IsdnCardState *cs, u_char val, u_char jade) 115 { 116 u_char r;
··· 110 } 111 112 113 + static void 114 jade_interrupt(struct IsdnCardState *cs, u_char val, u_char jade) 115 { 116 u_char r;
+1 -1
drivers/md/bitmap.c
··· 200 /* if page is completely empty, put it back on the free list, or dealloc it */ 201 /* if page was hijacked, unmark the flag so it might get alloced next time */ 202 /* Note: lock should be held when calling this */ 203 - static inline void bitmap_checkfree(struct bitmap *bitmap, unsigned long page) 204 { 205 char *ptr; 206
··· 200 /* if page is completely empty, put it back on the free list, or dealloc it */ 201 /* if page was hijacked, unmark the flag so it might get alloced next time */ 202 /* Note: lock should be held when calling this */ 203 + static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page) 204 { 205 char *ptr; 206
+1 -1
drivers/md/dm-crypt.c
··· 228 }; 229 230 231 - static inline int 232 crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, 233 struct scatterlist *in, unsigned int length, 234 int write, sector_t sector)
··· 228 }; 229 230 231 + static int 232 crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, 233 struct scatterlist *in, unsigned int length, 234 int write, sector_t sector)
+2 -2
drivers/md/dm-ioctl.c
··· 598 /* 599 * Always use UUID for lookups if it's present, otherwise use name or dev. 600 */ 601 - static inline struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) 602 { 603 if (*param->uuid) 604 return __get_uuid_cell(param->uuid); ··· 608 return dm_get_mdptr(huge_decode_dev(param->dev)); 609 } 610 611 - static inline struct mapped_device *find_device(struct dm_ioctl *param) 612 { 613 struct hash_cell *hc; 614 struct mapped_device *md = NULL;
··· 598 /* 599 * Always use UUID for lookups if it's present, otherwise use name or dev. 600 */ 601 + static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) 602 { 603 if (*param->uuid) 604 return __get_uuid_cell(param->uuid); ··· 608 return dm_get_mdptr(huge_decode_dev(param->dev)); 609 } 610 611 + static struct mapped_device *find_device(struct dm_ioctl *param) 612 { 613 struct hash_cell *hc; 614 struct mapped_device *md = NULL;
+1 -1
drivers/md/dm-snap.c
··· 691 /* 692 * Dispatches the copy operation to kcopyd. 693 */ 694 - static inline void start_copy(struct pending_exception *pe) 695 { 696 struct dm_snapshot *s = pe->snap; 697 struct io_region src, dest;
··· 691 /* 692 * Dispatches the copy operation to kcopyd. 693 */ 694 + static void start_copy(struct pending_exception *pe) 695 { 696 struct dm_snapshot *s = pe->snap; 697 struct io_region src, dest;
+1 -1
drivers/md/dm.c
··· 293 * Decrements the number of outstanding ios that a bio has been 294 * cloned into, completing the original io if necc. 295 */ 296 - static inline void dec_pending(struct dm_io *io, int error) 297 { 298 if (error) 299 io->error = error;
··· 293 * Decrements the number of outstanding ios that a bio has been 294 * cloned into, completing the original io if necc. 295 */ 296 + static void dec_pending(struct dm_io *io, int error) 297 { 298 if (error) 299 io->error = error;
+2 -2
drivers/md/raid1.c
··· 176 } 177 } 178 179 - static inline void free_r1bio(r1bio_t *r1_bio) 180 { 181 conf_t *conf = mddev_to_conf(r1_bio->mddev); 182 ··· 190 mempool_free(r1_bio, conf->r1bio_pool); 191 } 192 193 - static inline void put_buf(r1bio_t *r1_bio) 194 { 195 conf_t *conf = mddev_to_conf(r1_bio->mddev); 196 int i;
··· 176 } 177 } 178 179 + static void free_r1bio(r1bio_t *r1_bio) 180 { 181 conf_t *conf = mddev_to_conf(r1_bio->mddev); 182 ··· 190 mempool_free(r1_bio, conf->r1bio_pool); 191 } 192 193 + static void put_buf(r1bio_t *r1_bio) 194 { 195 conf_t *conf = mddev_to_conf(r1_bio->mddev); 196 int i;
+2 -2
drivers/md/raid10.c
··· 176 } 177 } 178 179 - static inline void free_r10bio(r10bio_t *r10_bio) 180 { 181 conf_t *conf = mddev_to_conf(r10_bio->mddev); 182 ··· 190 mempool_free(r10_bio, conf->r10bio_pool); 191 } 192 193 - static inline void put_buf(r10bio_t *r10_bio) 194 { 195 conf_t *conf = mddev_to_conf(r10_bio->mddev); 196
··· 176 } 177 } 178 179 + static void free_r10bio(r10bio_t *r10_bio) 180 { 181 conf_t *conf = mddev_to_conf(r10_bio->mddev); 182 ··· 190 mempool_free(r10_bio, conf->r10bio_pool); 191 } 192 193 + static void put_buf(r10bio_t *r10_bio) 194 { 195 conf_t *conf = mddev_to_conf(r10_bio->mddev); 196
+5 -5
drivers/md/raid5.c
··· 69 70 static void print_raid5_conf (raid5_conf_t *conf); 71 72 - static inline void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 73 { 74 if (atomic_dec_and_test(&sh->count)) { 75 if (!list_empty(&sh->lru)) ··· 118 hlist_del_init(&sh->hash); 119 } 120 121 - static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 122 { 123 struct hlist_head *hp = stripe_hash(conf, sh->sector); 124 ··· 178 179 static void raid5_build_block (struct stripe_head *sh, int i); 180 181 - static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx) 182 { 183 raid5_conf_t *conf = sh->raid_conf; 184 int disks = conf->raid_disks, i; ··· 1415 } 1416 } 1417 1418 - static inline void raid5_activate_delayed(raid5_conf_t *conf) 1419 { 1420 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 1421 while (!list_empty(&conf->delayed_list)) { ··· 1431 } 1432 } 1433 1434 - static inline void activate_bit_delay(raid5_conf_t *conf) 1435 { 1436 /* device_lock is held */ 1437 struct list_head head;
··· 69 70 static void print_raid5_conf (raid5_conf_t *conf); 71 72 + static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 73 { 74 if (atomic_dec_and_test(&sh->count)) { 75 if (!list_empty(&sh->lru)) ··· 118 hlist_del_init(&sh->hash); 119 } 120 121 + static void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 122 { 123 struct hlist_head *hp = stripe_hash(conf, sh->sector); 124 ··· 178 179 static void raid5_build_block (struct stripe_head *sh, int i); 180 181 + static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx) 182 { 183 raid5_conf_t *conf = sh->raid_conf; 184 int disks = conf->raid_disks, i; ··· 1415 } 1416 } 1417 1418 + static void raid5_activate_delayed(raid5_conf_t *conf) 1419 { 1420 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 1421 while (!list_empty(&conf->delayed_list)) { ··· 1431 } 1432 } 1433 1434 + static void activate_bit_delay(raid5_conf_t *conf) 1435 { 1436 /* device_lock is held */ 1437 struct list_head head;
+4 -4
drivers/md/raid6main.c
··· 88 89 static void print_raid6_conf (raid6_conf_t *conf); 90 91 - static inline void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh) 92 { 93 if (atomic_dec_and_test(&sh->count)) { 94 if (!list_empty(&sh->lru)) ··· 197 198 static void raid6_build_block (struct stripe_head *sh, int i); 199 200 - static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx) 201 { 202 raid6_conf_t *conf = sh->raid_conf; 203 int disks = conf->raid_disks, i; ··· 1577 } 1578 } 1579 1580 - static inline void raid6_activate_delayed(raid6_conf_t *conf) 1581 { 1582 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 1583 while (!list_empty(&conf->delayed_list)) { ··· 1593 } 1594 } 1595 1596 - static inline void activate_bit_delay(raid6_conf_t *conf) 1597 { 1598 /* device_lock is held */ 1599 struct list_head head;
··· 88 89 static void print_raid6_conf (raid6_conf_t *conf); 90 91 + static void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh) 92 { 93 if (atomic_dec_and_test(&sh->count)) { 94 if (!list_empty(&sh->lru)) ··· 197 198 static void raid6_build_block (struct stripe_head *sh, int i); 199 200 + static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx) 201 { 202 raid6_conf_t *conf = sh->raid_conf; 203 int disks = conf->raid_disks, i; ··· 1577 } 1578 } 1579 1580 + static void raid6_activate_delayed(raid6_conf_t *conf) 1581 { 1582 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 1583 while (!list_empty(&conf->delayed_list)) { ··· 1593 } 1594 } 1595 1596 + static void activate_bit_delay(raid6_conf_t *conf) 1597 { 1598 /* device_lock is held */ 1599 struct list_head head;
+1 -1
drivers/media/video/tvp5150.c
··· 93 int sat; 94 }; 95 96 - static inline int tvp5150_read(struct i2c_client *c, unsigned char addr) 97 { 98 unsigned char buffer[1]; 99 int rc;
··· 93 int sat; 94 }; 95 96 + static int tvp5150_read(struct i2c_client *c, unsigned char addr) 97 { 98 unsigned char buffer[1]; 99 int rc;
+2 -2
drivers/message/fusion/mptlan.c
··· 844 } 845 846 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 847 - static inline void 848 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority) 849 /* 850 * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue ··· 866 } 867 868 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 869 - static inline int 870 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb) 871 { 872 struct mpt_lan_priv *priv = dev->priv;
··· 844 } 845 846 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 847 + static void 848 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority) 849 /* 850 * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue ··· 866 } 867 868 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 869 + static int 870 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb) 871 { 872 struct mpt_lan_priv *priv = dev->priv;
+1 -1
drivers/mtd/devices/doc2000.c
··· 138 bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is 139 required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */ 140 141 - static inline int DoC_Command(struct DiskOnChip *doc, unsigned char command, 142 unsigned char xtraflags) 143 { 144 void __iomem *docptr = doc->virtadr;
··· 138 bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is 139 required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */ 140 141 + static int DoC_Command(struct DiskOnChip *doc, unsigned char command, 142 unsigned char xtraflags) 143 { 144 void __iomem *docptr = doc->virtadr;
+1 -1
drivers/mtd/devices/doc2001.c
··· 103 with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is 104 required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */ 105 106 - static inline void DoC_Command(void __iomem * docptr, unsigned char command, 107 unsigned char xtraflags) 108 { 109 /* Assert the CLE (Command Latch Enable) line to the flash chip */
··· 103 with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is 104 required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */ 105 106 + static void DoC_Command(void __iomem * docptr, unsigned char command, 107 unsigned char xtraflags) 108 { 109 /* Assert the CLE (Command Latch Enable) line to the flash chip */
+1 -1
drivers/mtd/devices/doc2001plus.c
··· 118 /* DoC_Command: Send a flash command to the flash chip through the Flash 119 * command register. Need 2 Write Pipeline Terminates to complete send. 120 */ 121 - static inline void DoC_Command(void __iomem * docptr, unsigned char command, 122 unsigned char xtraflags) 123 { 124 WriteDOC(command, docptr, Mplus_FlashCmd);
··· 118 /* DoC_Command: Send a flash command to the flash chip through the Flash 119 * command register. Need 2 Write Pipeline Terminates to complete send. 120 */ 121 + static void DoC_Command(void __iomem * docptr, unsigned char command, 122 unsigned char xtraflags) 123 { 124 WriteDOC(command, docptr, Mplus_FlashCmd);
+1 -1
drivers/mtd/nand/diskonchip.c
··· 1506 return 1; 1507 } 1508 1509 - static inline int __init doc_probe(unsigned long physadr) 1510 { 1511 unsigned char ChipID; 1512 struct mtd_info *mtd;
··· 1506 return 1; 1507 } 1508 1509 + static int __init doc_probe(unsigned long physadr) 1510 { 1511 unsigned char ChipID; 1512 struct mtd_info *mtd;
+9 -9
drivers/net/e100.c
··· 592 (void)readb(&nic->csr->scb.status); 593 } 594 595 - static inline void e100_enable_irq(struct nic *nic) 596 { 597 unsigned long flags; 598 ··· 602 e100_write_flush(nic); 603 } 604 605 - static inline void e100_disable_irq(struct nic *nic) 606 { 607 unsigned long flags; 608 ··· 791 792 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ 793 #define E100_WAIT_SCB_FAST 20 /* delay like the old code */ 794 - static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) 795 { 796 unsigned long flags; 797 unsigned int i; ··· 822 return err; 823 } 824 825 - static inline int e100_exec_cb(struct nic *nic, struct sk_buff *skb, 826 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) 827 { 828 struct cb *cb; ··· 1567 mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD); 1568 } 1569 1570 - static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb, 1571 struct sk_buff *skb) 1572 { 1573 cb->command = nic->tx_command; ··· 1617 return 0; 1618 } 1619 1620 - static inline int e100_tx_clean(struct nic *nic) 1621 { 1622 struct cb *cb; 1623 int tx_cleaned = 0; ··· 1728 } 1729 1730 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) 1731 - static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) 1732 { 1733 if(!(rx->skb = dev_alloc_skb(RFD_BUF_LEN + NET_IP_ALIGN))) 1734 return -ENOMEM; ··· 1762 return 0; 1763 } 1764 1765 - static inline int e100_rx_indicate(struct nic *nic, struct rx *rx, 1766 unsigned int *work_done, unsigned int work_to_do) 1767 { 1768 struct sk_buff *skb = rx->skb; ··· 1822 return 0; 1823 } 1824 1825 - static inline void e100_rx_clean(struct nic *nic, unsigned int *work_done, 1826 unsigned int work_to_do) 1827 { 1828 struct rx *rx;
··· 592 (void)readb(&nic->csr->scb.status); 593 } 594 595 + static void e100_enable_irq(struct nic *nic) 596 { 597 unsigned long flags; 598 ··· 602 e100_write_flush(nic); 603 } 604 605 + static void e100_disable_irq(struct nic *nic) 606 { 607 unsigned long flags; 608 ··· 791 792 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ 793 #define E100_WAIT_SCB_FAST 20 /* delay like the old code */ 794 + static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) 795 { 796 unsigned long flags; 797 unsigned int i; ··· 822 return err; 823 } 824 825 + static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, 826 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) 827 { 828 struct cb *cb; ··· 1567 mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD); 1568 } 1569 1570 + static void e100_xmit_prepare(struct nic *nic, struct cb *cb, 1571 struct sk_buff *skb) 1572 { 1573 cb->command = nic->tx_command; ··· 1617 return 0; 1618 } 1619 1620 + static int e100_tx_clean(struct nic *nic) 1621 { 1622 struct cb *cb; 1623 int tx_cleaned = 0; ··· 1728 } 1729 1730 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) 1731 + static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) 1732 { 1733 if(!(rx->skb = dev_alloc_skb(RFD_BUF_LEN + NET_IP_ALIGN))) 1734 return -ENOMEM; ··· 1762 return 0; 1763 } 1764 1765 + static int e100_rx_indicate(struct nic *nic, struct rx *rx, 1766 unsigned int *work_done, unsigned int work_to_do) 1767 { 1768 struct sk_buff *skb = rx->skb; ··· 1822 return 0; 1823 } 1824 1825 + static void e100_rx_clean(struct nic *nic, unsigned int *work_done, 1826 unsigned int work_to_do) 1827 { 1828 struct rx *rx;
+2 -2
drivers/net/sb1000.c
··· 94 const char* name); 95 static inline int card_wait_for_ready(const int ioaddr[], const char* name, 96 unsigned char in[]); 97 - static inline int card_send_command(const int ioaddr[], const char* name, 98 const unsigned char out[], unsigned char in[]); 99 100 /* SB1000 hardware routines to be used during frame rx interrupt */ ··· 309 } 310 311 /* Card Send Command (cannot be used during an interrupt) */ 312 - static inline int 313 card_send_command(const int ioaddr[], const char* name, 314 const unsigned char out[], unsigned char in[]) 315 {
··· 94 const char* name); 95 static inline int card_wait_for_ready(const int ioaddr[], const char* name, 96 unsigned char in[]); 97 + static int card_send_command(const int ioaddr[], const char* name, 98 const unsigned char out[], unsigned char in[]); 99 100 /* SB1000 hardware routines to be used during frame rx interrupt */ ··· 309 } 310 311 /* Card Send Command (cannot be used during an interrupt) */ 312 + static int 313 card_send_command(const int ioaddr[], const char* name, 314 const unsigned char out[], unsigned char in[]) 315 {
+5 -5
drivers/net/wireless/hostap/hostap_80211_rx.c
··· 435 } 436 437 438 - static inline int 439 hostap_rx_frame_mgmt(local_info_t *local, struct sk_buff *skb, 440 struct hostap_80211_rx_status *rx_stats, u16 type, 441 u16 stype) ··· 499 500 501 /* Called only as a tasklet (software IRQ) */ 502 - static inline struct net_device *prism2_rx_get_wds(local_info_t *local, 503 u8 *addr) 504 { 505 struct hostap_interface *iface = NULL; ··· 519 } 520 521 522 - static inline int 523 hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr, 524 u16 fc, struct net_device **wds) 525 { ··· 615 616 617 /* Called only as a tasklet (software IRQ) */ 618 - static inline int 619 hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb, 620 struct ieee80211_crypt_data *crypt) 621 { ··· 654 655 656 /* Called only as a tasklet (software IRQ) */ 657 - static inline int 658 hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb, 659 int keyidx, struct ieee80211_crypt_data *crypt) 660 {
··· 435 } 436 437 438 + static int 439 hostap_rx_frame_mgmt(local_info_t *local, struct sk_buff *skb, 440 struct hostap_80211_rx_status *rx_stats, u16 type, 441 u16 stype) ··· 499 500 501 /* Called only as a tasklet (software IRQ) */ 502 + static struct net_device *prism2_rx_get_wds(local_info_t *local, 503 u8 *addr) 504 { 505 struct hostap_interface *iface = NULL; ··· 519 } 520 521 522 + static int 523 hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr, 524 u16 fc, struct net_device **wds) 525 { ··· 615 616 617 /* Called only as a tasklet (software IRQ) */ 618 + static int 619 hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb, 620 struct ieee80211_crypt_data *crypt) 621 { ··· 654 655 656 /* Called only as a tasklet (software IRQ) */ 657 + static int 658 hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb, 659 int keyidx, struct ieee80211_crypt_data *crypt) 660 {
+4 -4
drivers/net/wireless/hostap/hostap_hw.c
··· 253 * @dev: pointer to net_device 254 * @entry: Prism2 command queue entry to be issued 255 */ 256 - static inline int hfa384x_cmd_issue(struct net_device *dev, 257 struct hostap_cmd_queue *entry) 258 { 259 struct hostap_interface *iface; ··· 743 } 744 745 746 - static inline int hfa384x_wait_offset(struct net_device *dev, u16 o_off) 747 { 748 int tries = HFA384X_BAP_BUSY_TIMEOUT; 749 int res = HFA384X_INW(o_off) & HFA384X_OFFSET_BUSY; ··· 1904 * and will try to get the correct fid eventually. */ 1905 #define EXTRA_FID_READ_TESTS 1906 1907 - static inline u16 prism2_read_fid_reg(struct net_device *dev, u16 reg) 1908 { 1909 #ifdef EXTRA_FID_READ_TESTS 1910 u16 val, val2, val3; ··· 2581 2582 2583 /* Called only from hardware IRQ */ 2584 - static inline void prism2_check_magic(local_info_t *local) 2585 { 2586 /* at least PCI Prism2.5 with bus mastering seems to sometimes 2587 * return 0x0000 in SWSUPPORT0 for unknown reason, but re-reading the
··· 253 * @dev: pointer to net_device 254 * @entry: Prism2 command queue entry to be issued 255 */ 256 + static int hfa384x_cmd_issue(struct net_device *dev, 257 struct hostap_cmd_queue *entry) 258 { 259 struct hostap_interface *iface; ··· 743 } 744 745 746 + static int hfa384x_wait_offset(struct net_device *dev, u16 o_off) 747 { 748 int tries = HFA384X_BAP_BUSY_TIMEOUT; 749 int res = HFA384X_INW(o_off) & HFA384X_OFFSET_BUSY; ··· 1904 * and will try to get the correct fid eventually. */ 1905 #define EXTRA_FID_READ_TESTS 1906 1907 + static u16 prism2_read_fid_reg(struct net_device *dev, u16 reg) 1908 { 1909 #ifdef EXTRA_FID_READ_TESTS 1910 u16 val, val2, val3; ··· 2581 2582 2583 /* Called only from hardware IRQ */ 2584 + static void prism2_check_magic(local_info_t *local) 2585 { 2586 /* at least PCI Prism2.5 with bus mastering seems to sometimes 2587 * return 0x0000 in SWSUPPORT0 for unknown reason, but re-reading the
+13 -13
drivers/net/wireless/ipw2100.c
··· 411 write_register(dev, IPW_REG_AUTOINCREMENT_DATA, val); 412 } 413 414 - static inline void write_nic_memory(struct net_device *dev, u32 addr, u32 len, 415 const u8 * buf) 416 { 417 u32 aligned_addr; ··· 449 *buf); 450 } 451 452 - static inline void read_nic_memory(struct net_device *dev, u32 addr, u32 len, 453 u8 * buf) 454 { 455 u32 aligned_addr; ··· 657 658 #define MAX_RESET_BACKOFF 10 659 660 - static inline void schedule_reset(struct ipw2100_priv *priv) 661 { 662 unsigned long now = get_seconds(); 663 ··· 1130 write_register(priv->net_dev, IPW_REG_GPIO, reg); 1131 } 1132 1133 - static inline int rf_kill_active(struct ipw2100_priv *priv) 1134 { 1135 #define MAX_RF_KILL_CHECKS 5 1136 #define RF_KILL_CHECK_DELAY 40 ··· 2177 }; 2178 #endif 2179 2180 - static inline int ipw2100_alloc_skb(struct ipw2100_priv *priv, 2181 struct ipw2100_rx_packet *packet) 2182 { 2183 packet->skb = dev_alloc_skb(sizeof(struct ipw2100_rx)); ··· 2201 #define SEARCH_SNAPSHOT 1 2202 2203 #define SNAPSHOT_ADDR(ofs) (priv->snapshot[((ofs) >> 12) & 0xff] + ((ofs) & 0xfff)) 2204 - static inline int ipw2100_snapshot_alloc(struct ipw2100_priv *priv) 2205 { 2206 int i; 2207 if (priv->snapshot[0]) ··· 2221 return 1; 2222 } 2223 2224 - static inline void ipw2100_snapshot_free(struct ipw2100_priv *priv) 2225 { 2226 int i; 2227 if (!priv->snapshot[0]) ··· 2231 priv->snapshot[0] = NULL; 2232 } 2233 2234 - static inline u32 ipw2100_match_buf(struct ipw2100_priv *priv, u8 * in_buf, 2235 size_t len, int mode) 2236 { 2237 u32 i, j; ··· 2288 static u8 packet_data[IPW_RX_NIC_BUFFER_LENGTH]; 2289 #endif 2290 2291 - static inline void ipw2100_corruption_detected(struct ipw2100_priv *priv, int i) 2292 { 2293 #ifdef CONFIG_IPW2100_DEBUG_C3 2294 struct ipw2100_status *status = &priv->status_queue.drv[i]; ··· 2346 schedule_reset(priv); 2347 } 2348 2349 - static inline void isr_rx(struct ipw2100_priv *priv, int i, 2350 struct ieee80211_rx_stats *stats) 2351 { 2352 struct ipw2100_status *status = &priv->status_queue.drv[i]; ··· 2425 priv->rx_queue.drv[i].host_addr = packet->dma_addr; 2426 } 2427 2428 - static inline int ipw2100_corruption_check(struct ipw2100_priv *priv, int i) 2429 { 2430 struct ipw2100_status *status = &priv->status_queue.drv[i]; 2431 struct ipw2100_rx *u = priv->rx_buffers[i].rxp; ··· 2481 * The WRITE index is cached in the variable 'priv->rx_queue.next'. 2482 * 2483 */ 2484 - static inline void __ipw2100_rx_process(struct ipw2100_priv *priv) 2485 { 2486 struct ipw2100_bd_queue *rxq = &priv->rx_queue; 2487 struct ipw2100_status_queue *sq = &priv->status_queue; ··· 2634 * for use by future command and data packets. 2635 * 2636 */ 2637 - static inline int __ipw2100_tx_process(struct ipw2100_priv *priv) 2638 { 2639 struct ipw2100_bd_queue *txq = &priv->tx_queue; 2640 struct ipw2100_bd *tbd;
··· 411 write_register(dev, IPW_REG_AUTOINCREMENT_DATA, val); 412 } 413 414 + static void write_nic_memory(struct net_device *dev, u32 addr, u32 len, 415 const u8 * buf) 416 { 417 u32 aligned_addr; ··· 449 *buf); 450 } 451 452 + static void read_nic_memory(struct net_device *dev, u32 addr, u32 len, 453 u8 * buf) 454 { 455 u32 aligned_addr; ··· 657 658 #define MAX_RESET_BACKOFF 10 659 660 + static void schedule_reset(struct ipw2100_priv *priv) 661 { 662 unsigned long now = get_seconds(); 663 ··· 1130 write_register(priv->net_dev, IPW_REG_GPIO, reg); 1131 } 1132 1133 + static int rf_kill_active(struct ipw2100_priv *priv) 1134 { 1135 #define MAX_RF_KILL_CHECKS 5 1136 #define RF_KILL_CHECK_DELAY 40 ··· 2177 }; 2178 #endif 2179 2180 + static int ipw2100_alloc_skb(struct ipw2100_priv *priv, 2181 struct ipw2100_rx_packet *packet) 2182 { 2183 packet->skb = dev_alloc_skb(sizeof(struct ipw2100_rx)); ··· 2201 #define SEARCH_SNAPSHOT 1 2202 2203 #define SNAPSHOT_ADDR(ofs) (priv->snapshot[((ofs) >> 12) & 0xff] + ((ofs) & 0xfff)) 2204 + static int ipw2100_snapshot_alloc(struct ipw2100_priv *priv) 2205 { 2206 int i; 2207 if (priv->snapshot[0]) ··· 2221 return 1; 2222 } 2223 2224 + static void ipw2100_snapshot_free(struct ipw2100_priv *priv) 2225 { 2226 int i; 2227 if (!priv->snapshot[0]) ··· 2231 priv->snapshot[0] = NULL; 2232 } 2233 2234 + static u32 ipw2100_match_buf(struct ipw2100_priv *priv, u8 * in_buf, 2235 size_t len, int mode) 2236 { 2237 u32 i, j; ··· 2288 static u8 packet_data[IPW_RX_NIC_BUFFER_LENGTH]; 2289 #endif 2290 2291 + static void ipw2100_corruption_detected(struct ipw2100_priv *priv, int i) 2292 { 2293 #ifdef CONFIG_IPW2100_DEBUG_C3 2294 struct ipw2100_status *status = &priv->status_queue.drv[i]; ··· 2346 schedule_reset(priv); 2347 } 2348 2349 + static void isr_rx(struct ipw2100_priv *priv, int i, 2350 struct ieee80211_rx_stats *stats) 2351 { 2352 struct ipw2100_status *status = &priv->status_queue.drv[i]; ··· 2425 priv->rx_queue.drv[i].host_addr = packet->dma_addr; 2426 } 2427 2428 + static int ipw2100_corruption_check(struct ipw2100_priv *priv, int i) 2429 { 2430 struct ipw2100_status *status = &priv->status_queue.drv[i]; 2431 struct ipw2100_rx *u = priv->rx_buffers[i].rxp; ··· 2481 * The WRITE index is cached in the variable 'priv->rx_queue.next'. 2482 * 2483 */ 2484 + static void __ipw2100_rx_process(struct ipw2100_priv *priv) 2485 { 2486 struct ipw2100_bd_queue *rxq = &priv->rx_queue; 2487 struct ipw2100_status_queue *sq = &priv->status_queue; ··· 2634 * for use by future command and data packets. 2635 * 2636 */ 2637 + static int __ipw2100_tx_process(struct ipw2100_priv *priv) 2638 { 2639 struct ipw2100_bd_queue *txq = &priv->tx_queue; 2640 struct ipw2100_bd *tbd;
+21 -21
drivers/net/wireless/ipw2200.c
··· 813 up(&priv->sem); 814 } 815 816 - static inline void __ipw_led_activity_on(struct ipw_priv *priv) 817 { 818 u32 led; 819 ··· 1508 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO, 1509 show_direct_dword, store_direct_dword); 1510 1511 - static inline int rf_kill_active(struct ipw_priv *priv) 1512 { 1513 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) 1514 priv->status |= STATUS_RF_KILL_HW; ··· 2359 } 2360 2361 /* perform a chip select operation */ 2362 - static inline void eeprom_cs(struct ipw_priv *priv) 2363 { 2364 eeprom_write_reg(priv, 0); 2365 eeprom_write_reg(priv, EEPROM_BIT_CS); ··· 2368 } 2369 2370 /* perform a chip select operation */ 2371 - static inline void eeprom_disable_cs(struct ipw_priv *priv) 2372 { 2373 eeprom_write_reg(priv, EEPROM_BIT_CS); 2374 eeprom_write_reg(priv, 0); ··· 2475 IPW_DEBUG_TRACE("<<\n"); 2476 } 2477 2478 - static inline void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count) 2479 { 2480 count >>= 2; 2481 if (!count) ··· 2772 return ipw_read32(priv, 0x90) == 0xd55555d5; 2773 } 2774 2775 - static inline int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask, 2776 int timeout) 2777 { 2778 int i = 0; ··· 3150 3151 #define IPW_RX_BUF_SIZE (3000) 3152 3153 - static inline void ipw_rx_queue_reset(struct ipw_priv *priv, 3154 struct ipw_rx_queue *rxq) 3155 { 3156 unsigned long flags; ··· 3608 ipw_queue_tx_free(priv, &priv->txq[3]); 3609 } 3610 3611 - static inline void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid) 3612 { 3613 /* First 3 bytes are manufacturer */ 3614 bssid[0] = priv->mac_addr[0]; ··· 3622 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */ 3623 } 3624 3625 - static inline u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid) 3626 { 3627 struct ipw_station_entry entry; 3628 int i; ··· 3655 return i; 3656 } 3657 3658 - static inline u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid) 3659 { 3660 int i; 3661 ··· 3794 memset(avg, 0, sizeof(*avg)); 3795 } 3796 3797 - static void inline average_add(struct average *avg, s16 val) 3798 { 3799 avg->sum -= avg->entries[avg->pos]; 3800 avg->sum += val; ··· 3805 } 3806 } 3807 3808 - static s16 inline average_value(struct average *avg) 3809 { 3810 if (!unlikely(avg->init)) { 3811 if (avg->pos) ··· 3847 3848 } 3849 3850 - static inline u32 ipw_get_max_rate(struct ipw_priv *priv) 3851 { 3852 u32 i = 0x80000000; 3853 u32 mask = priv->rates_mask; ··· 4087 * roaming_threshold -> disassociate_threshold, scan and roam for better signal. 4088 * Above disassociate threshold, give up and stop scanning. 4089 * Roaming is disabled if disassociate_threshold <= roaming_threshold */ 4090 - static inline void ipw_handle_missed_beacon(struct ipw_priv *priv, 4091 int missed_count) 4092 { 4093 priv->notif_missed_beacons = missed_count; ··· 4157 * Handle host notification packet. 4158 * Called from interrupt routine 4159 */ 4160 - static inline void ipw_rx_notification(struct ipw_priv *priv, 4161 struct ipw_rx_notification *notif) 4162 { 4163 notif->size = le16_to_cpu(notif->size); ··· 5095 return 1; 5096 } 5097 5098 - static inline void ipw_copy_rates(struct ipw_supported_rates *dest, 5099 const struct ipw_supported_rates *src) 5100 { 5101 u8 i; ··· 5856 #define ipw_debug_config(x) do {} while (0) 5857 #endif 5858 5859 - static inline void ipw_set_fixed_rate(struct ipw_priv *priv, int mode) 5860 { 5861 /* TODO: Verify that this works... */ 5862 struct ipw_fixed_rate fr = { ··· 7634 } 7635 #endif 7636 7637 - static inline int is_network_packet(struct ipw_priv *priv, 7638 struct ieee80211_hdr_4addr *header) 7639 { 7640 /* Filter incoming packets to determine if they are targetted toward ··· 7672 7673 #define IPW_PACKET_RETRY_TIME HZ 7674 7675 - static inline int is_duplicate_packet(struct ipw_priv *priv, 7676 struct ieee80211_hdr_4addr *header) 7677 { 7678 u16 sc = le16_to_cpu(header->seq_ctl); ··· 9581 9582 /* net device stuff */ 9583 9584 - static inline void init_sys_config(struct ipw_sys_config *sys_config) 9585 { 9586 memset(sys_config, 0, sizeof(struct ipw_sys_config)); 9587 sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */ ··· 9627 we need to heavily modify the ieee80211_skb_to_txb. 9628 */ 9629 9630 - static inline int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, 9631 int pri) 9632 { 9633 struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
··· 813 up(&priv->sem); 814 } 815 816 + static void __ipw_led_activity_on(struct ipw_priv *priv) 817 { 818 u32 led; 819 ··· 1508 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO, 1509 show_direct_dword, store_direct_dword); 1510 1511 + static int rf_kill_active(struct ipw_priv *priv) 1512 { 1513 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) 1514 priv->status |= STATUS_RF_KILL_HW; ··· 2359 } 2360 2361 /* perform a chip select operation */ 2362 + static void eeprom_cs(struct ipw_priv *priv) 2363 { 2364 eeprom_write_reg(priv, 0); 2365 eeprom_write_reg(priv, EEPROM_BIT_CS); ··· 2368 } 2369 2370 /* perform a chip select operation */ 2371 + static void eeprom_disable_cs(struct ipw_priv *priv) 2372 { 2373 eeprom_write_reg(priv, EEPROM_BIT_CS); 2374 eeprom_write_reg(priv, 0); ··· 2475 IPW_DEBUG_TRACE("<<\n"); 2476 } 2477 2478 + static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count) 2479 { 2480 count >>= 2; 2481 if (!count) ··· 2772 return ipw_read32(priv, 0x90) == 0xd55555d5; 2773 } 2774 2775 + static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask, 2776 int timeout) 2777 { 2778 int i = 0; ··· 3150 3151 #define IPW_RX_BUF_SIZE (3000) 3152 3153 + static void ipw_rx_queue_reset(struct ipw_priv *priv, 3154 struct ipw_rx_queue *rxq) 3155 { 3156 unsigned long flags; ··· 3608 ipw_queue_tx_free(priv, &priv->txq[3]); 3609 } 3610 3611 + static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid) 3612 { 3613 /* First 3 bytes are manufacturer */ 3614 bssid[0] = priv->mac_addr[0]; ··· 3622 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */ 3623 } 3624 3625 + static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid) 3626 { 3627 struct ipw_station_entry entry; 3628 int i; ··· 3655 return i; 3656 } 3657 3658 + static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid) 3659 { 3660 int i; 3661 ··· 3794 memset(avg, 0, sizeof(*avg)); 3795 } 3796 3797 + static void average_add(struct average *avg, s16 val) 3798 { 3799 avg->sum -= avg->entries[avg->pos]; 3800 avg->sum += val; ··· 3805 } 3806 } 3807 3808 + static s16 average_value(struct average *avg) 3809 { 3810 if (!unlikely(avg->init)) { 3811 if (avg->pos) ··· 3847 3848 } 3849 3850 + static u32 ipw_get_max_rate(struct ipw_priv *priv) 3851 { 3852 u32 i = 0x80000000; 3853 u32 mask = priv->rates_mask; ··· 4087 * roaming_threshold -> disassociate_threshold, scan and roam for better signal. 4088 * Above disassociate threshold, give up and stop scanning. 4089 * Roaming is disabled if disassociate_threshold <= roaming_threshold */ 4090 + static void ipw_handle_missed_beacon(struct ipw_priv *priv, 4091 int missed_count) 4092 { 4093 priv->notif_missed_beacons = missed_count; ··· 4157 * Handle host notification packet. 4158 * Called from interrupt routine 4159 */ 4160 + static void ipw_rx_notification(struct ipw_priv *priv, 4161 struct ipw_rx_notification *notif) 4162 { 4163 notif->size = le16_to_cpu(notif->size); ··· 5095 return 1; 5096 } 5097 5098 + static void ipw_copy_rates(struct ipw_supported_rates *dest, 5099 const struct ipw_supported_rates *src) 5100 { 5101 u8 i; ··· 5856 #define ipw_debug_config(x) do {} while (0) 5857 #endif 5858 5859 + static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode) 5860 { 5861 /* TODO: Verify that this works... */ 5862 struct ipw_fixed_rate fr = { ··· 7634 } 7635 #endif 7636 7637 + static int is_network_packet(struct ipw_priv *priv, 7638 struct ieee80211_hdr_4addr *header) 7639 { 7640 /* Filter incoming packets to determine if they are targetted toward ··· 7672 7673 #define IPW_PACKET_RETRY_TIME HZ 7674 7675 + static int is_duplicate_packet(struct ipw_priv *priv, 7676 struct ieee80211_hdr_4addr *header) 7677 { 7678 u16 sc = le16_to_cpu(header->seq_ctl); ··· 9581 9582 /* net device stuff */ 9583 9584 + static void init_sys_config(struct ipw_sys_config *sys_config) 9585 { 9586 memset(sys_config, 0, sizeof(struct ipw_sys_config)); 9587 sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */ ··· 9627 we need to heavily modify the ieee80211_skb_to_txb. 9628 */ 9629 9630 + static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, 9631 int pri) 9632 { 9633 struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
+19 -19
drivers/net/wireless/wavelan.c
··· 102 * Write to card's Host Adapter Command Register. Include a delay for 103 * those times when it is needed. 104 */ 105 - static inline void hacr_write_slow(unsigned long ioaddr, u16 hacr) 106 { 107 hacr_write(ioaddr, hacr); 108 /* delay might only be needed sometimes */ ··· 242 * The Windows drivers don't use the CRC, but the AP and the PtP tool 243 * depend on it. 244 */ 245 - static inline u16 psa_crc(u8 * psa, /* The PSA */ 246 int size) 247 { /* Number of short for CRC */ 248 int byte_cnt; /* Loop on the PSA */ ··· 310 /* 311 * Write 1 byte to the MMC. 312 */ 313 - static inline void mmc_out(unsigned long ioaddr, u16 o, u8 d) 314 { 315 int count = 0; 316 ··· 326 * Routine to write bytes to the Modem Management Controller. 327 * We start at the end because it is the way it should be! 328 */ 329 - static inline void mmc_write(unsigned long ioaddr, u8 o, u8 * b, int n) 330 { 331 o += n; 332 b += n; ··· 340 * Read a byte from the MMC. 341 * Optimised version for 1 byte, avoid using memory. 342 */ 343 - static inline u8 mmc_in(unsigned long ioaddr, u16 o) 344 { 345 int count = 0; 346 ··· 587 * Set channel attention bit and busy wait until command has 588 * completed, then acknowledge completion of the command. 589 */ 590 - static inline int wv_synchronous_cmd(struct net_device * dev, const char *str) 591 { 592 net_local *lp = (net_local *) dev->priv; 593 unsigned long ioaddr = dev->base_addr; ··· 633 * Configuration commands completion interrupt. 634 * Check if done, and if OK. 635 */ 636 - static inline int 637 wv_config_complete(struct net_device * dev, unsigned long ioaddr, net_local * lp) 638 { 639 unsigned short mcs_addr; ··· 843 * wavelan_interrupt is not an option), so you may experience 844 * delays sometimes. 845 */ 846 - static inline void wv_82586_reconfig(struct net_device * dev) 847 { 848 net_local *lp = (net_local *) dev->priv; 849 unsigned long flags; ··· 1281 * This is the information which is displayed by the driver at startup. 1282 * There are lots of flags for configuring it to your liking. 1283 */ 1284 - static inline void wv_init_info(struct net_device * dev) 1285 { 1286 short ioaddr = dev->base_addr; 1287 net_local *lp = (net_local *) dev->priv; ··· 1502 * It's a bit complicated and you don't really want to look into it. 1503 * (called in wavelan_ioctl) 1504 */ 1505 - static inline int wv_set_frequency(unsigned long ioaddr, /* I/O port of the card */ 1506 iw_freq * frequency) 1507 { 1508 const int BAND_NUM = 10; /* Number of bands */ ··· 1677 /* 1678 * Give the list of available frequencies. 1679 */ 1680 - static inline int wv_frequency_list(unsigned long ioaddr, /* I/O port of the card */ 1681 iw_freq * list, /* List of frequencies to fill */ 1682 int max) 1683 { /* Maximum number of frequencies */ ··· 2489 * Note: if any errors occur, the packet is "dropped on the floor". 2490 * (called by wv_packet_rcv()) 2491 */ 2492 - static inline void 2493 wv_packet_read(struct net_device * dev, u16 buf_off, int sksize) 2494 { 2495 net_local *lp = (net_local *) dev->priv; ··· 2585 * (called in wavelan_interrupt()). 2586 * Note : the spinlock is already grabbed for us. 2587 */ 2588 - static inline void wv_receive(struct net_device * dev) 2589 { 2590 unsigned long ioaddr = dev->base_addr; 2591 net_local *lp = (net_local *) dev->priv; ··· 2768 * 2769 * (called in wavelan_packet_xmit()) 2770 */ 2771 - static inline int wv_packet_write(struct net_device * dev, void *buf, short length) 2772 { 2773 net_local *lp = (net_local *) dev->priv; 2774 unsigned long ioaddr = dev->base_addr; ··· 2964 * Routine to initialize the Modem Management Controller. 2965 * (called by wv_hw_reset()) 2966 */ 2967 - static inline int wv_mmc_init(struct net_device * dev) 2968 { 2969 unsigned long ioaddr = dev->base_addr; 2970 net_local *lp = (net_local *) dev->priv; ··· 3136 * Start the receive unit. 3137 * (called by wv_hw_reset()) 3138 */ 3139 - static inline int wv_ru_start(struct net_device * dev) 3140 { 3141 net_local *lp = (net_local *) dev->priv; 3142 unsigned long ioaddr = dev->base_addr; ··· 3228 * 3229 * (called by wv_hw_reset()) 3230 */ 3231 - static inline int wv_cu_start(struct net_device * dev) 3232 { 3233 net_local *lp = (net_local *) dev->priv; 3234 unsigned long ioaddr = dev->base_addr; ··· 3329 * 3330 * (called by wv_hw_reset()) 3331 */ 3332 - static inline int wv_82586_start(struct net_device * dev) 3333 { 3334 net_local *lp = (net_local *) dev->priv; 3335 unsigned long ioaddr = dev->base_addr; ··· 3641 * WaveLAN controller (i82586). 3642 * (called by wavelan_close()) 3643 */ 3644 - static inline void wv_82586_stop(struct net_device * dev) 3645 { 3646 net_local *lp = (net_local *) dev->priv; 3647 unsigned long ioaddr = dev->base_addr;
··· 102 * Write to card's Host Adapter Command Register. Include a delay for 103 * those times when it is needed. 104 */ 105 + static void hacr_write_slow(unsigned long ioaddr, u16 hacr) 106 { 107 hacr_write(ioaddr, hacr); 108 /* delay might only be needed sometimes */ ··· 242 * The Windows drivers don't use the CRC, but the AP and the PtP tool 243 * depend on it. 244 */ 245 + static u16 psa_crc(u8 * psa, /* The PSA */ 246 int size) 247 { /* Number of short for CRC */ 248 int byte_cnt; /* Loop on the PSA */ ··· 310 /* 311 * Write 1 byte to the MMC. 312 */ 313 + static void mmc_out(unsigned long ioaddr, u16 o, u8 d) 314 { 315 int count = 0; 316 ··· 326 * Routine to write bytes to the Modem Management Controller. 327 * We start at the end because it is the way it should be! 328 */ 329 + static void mmc_write(unsigned long ioaddr, u8 o, u8 * b, int n) 330 { 331 o += n; 332 b += n; ··· 340 * Read a byte from the MMC. 341 * Optimised version for 1 byte, avoid using memory. 342 */ 343 + static u8 mmc_in(unsigned long ioaddr, u16 o) 344 { 345 int count = 0; 346 ··· 587 * Set channel attention bit and busy wait until command has 588 * completed, then acknowledge completion of the command. 589 */ 590 + static int wv_synchronous_cmd(struct net_device * dev, const char *str) 591 { 592 net_local *lp = (net_local *) dev->priv; 593 unsigned long ioaddr = dev->base_addr; ··· 633 * Configuration commands completion interrupt. 634 * Check if done, and if OK. 635 */ 636 + static int 637 wv_config_complete(struct net_device * dev, unsigned long ioaddr, net_local * lp) 638 { 639 unsigned short mcs_addr; ··· 843 * wavelan_interrupt is not an option), so you may experience 844 * delays sometimes. 845 */ 846 + static void wv_82586_reconfig(struct net_device * dev) 847 { 848 net_local *lp = (net_local *) dev->priv; 849 unsigned long flags; ··· 1281 * This is the information which is displayed by the driver at startup. 1282 * There are lots of flags for configuring it to your liking. 1283 */ 1284 + static void wv_init_info(struct net_device * dev) 1285 { 1286 short ioaddr = dev->base_addr; 1287 net_local *lp = (net_local *) dev->priv; ··· 1502 * It's a bit complicated and you don't really want to look into it. 1503 * (called in wavelan_ioctl) 1504 */ 1505 + static int wv_set_frequency(unsigned long ioaddr, /* I/O port of the card */ 1506 iw_freq * frequency) 1507 { 1508 const int BAND_NUM = 10; /* Number of bands */ ··· 1677 /* 1678 * Give the list of available frequencies. 1679 */ 1680 + static int wv_frequency_list(unsigned long ioaddr, /* I/O port of the card */ 1681 iw_freq * list, /* List of frequencies to fill */ 1682 int max) 1683 { /* Maximum number of frequencies */ ··· 2489 * Note: if any errors occur, the packet is "dropped on the floor". 2490 * (called by wv_packet_rcv()) 2491 */ 2492 + static void 2493 wv_packet_read(struct net_device * dev, u16 buf_off, int sksize) 2494 { 2495 net_local *lp = (net_local *) dev->priv; ··· 2585 * (called in wavelan_interrupt()). 2586 * Note : the spinlock is already grabbed for us. 2587 */ 2588 + static void wv_receive(struct net_device * dev) 2589 { 2590 unsigned long ioaddr = dev->base_addr; 2591 net_local *lp = (net_local *) dev->priv; ··· 2768 * 2769 * (called in wavelan_packet_xmit()) 2770 */ 2771 + static int wv_packet_write(struct net_device * dev, void *buf, short length) 2772 { 2773 net_local *lp = (net_local *) dev->priv; 2774 unsigned long ioaddr = dev->base_addr; ··· 2964 * Routine to initialize the Modem Management Controller. 2965 * (called by wv_hw_reset()) 2966 */ 2967 + static int wv_mmc_init(struct net_device * dev) 2968 { 2969 unsigned long ioaddr = dev->base_addr; 2970 net_local *lp = (net_local *) dev->priv; ··· 3136 * Start the receive unit. 3137 * (called by wv_hw_reset()) 3138 */ 3139 + static int wv_ru_start(struct net_device * dev) 3140 { 3141 net_local *lp = (net_local *) dev->priv; 3142 unsigned long ioaddr = dev->base_addr; ··· 3228 * 3229 * (called by wv_hw_reset()) 3230 */ 3231 + static int wv_cu_start(struct net_device * dev) 3232 { 3233 net_local *lp = (net_local *) dev->priv; 3234 unsigned long ioaddr = dev->base_addr; ··· 3329 * 3330 * (called by wv_hw_reset()) 3331 */ 3332 + static int wv_82586_start(struct net_device * dev) 3333 { 3334 net_local *lp = (net_local *) dev->priv; 3335 unsigned long ioaddr = dev->base_addr; ··· 3641 * WaveLAN controller (i82586). 3642 * (called by wavelan_close()) 3643 */ 3644 + static void wv_82586_stop(struct net_device * dev) 3645 { 3646 net_local *lp = (net_local *) dev->priv; 3647 unsigned long ioaddr = dev->base_addr;
+2 -2
drivers/scsi/aic7xxx_old.c
··· 1290 * 1291 ***************************************************************************/ 1292 1293 - static inline unsigned char 1294 aic_inb(struct aic7xxx_host *p, long port) 1295 { 1296 #ifdef MMAPIO ··· 1309 #endif 1310 } 1311 1312 - static inline void 1313 aic_outb(struct aic7xxx_host *p, unsigned char val, long port) 1314 { 1315 #ifdef MMAPIO
··· 1290 * 1291 ***************************************************************************/ 1292 1293 + static unsigned char 1294 aic_inb(struct aic7xxx_host *p, long port) 1295 { 1296 #ifdef MMAPIO ··· 1309 #endif 1310 } 1311 1312 + static void 1313 aic_outb(struct aic7xxx_host *p, unsigned char val, long port) 1314 { 1315 #ifdef MMAPIO
+1 -1
drivers/scsi/iscsi_tcp.c
··· 1418 ctask->digest_count = 4; 1419 } 1420 1421 - static inline int 1422 iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 1423 struct iscsi_buf *buf, uint32_t *digest, int final) 1424 {
··· 1418 ctask->digest_count = 4; 1419 } 1420 1421 + static int 1422 iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 1423 struct iscsi_buf *buf, uint32_t *digest, int final) 1424 {
+1 -1
drivers/scsi/libata-core.c
··· 1747 { ATA_SHIFT_PIO, XFER_PIO_0 }, 1748 }; 1749 1750 - static inline u8 base_from_shift(unsigned int shift) 1751 { 1752 int i; 1753
··· 1747 { ATA_SHIFT_PIO, XFER_PIO_0 }, 1748 }; 1749 1750 + static u8 base_from_shift(unsigned int shift) 1751 { 1752 int i; 1753
+5 -5
drivers/scsi/megaraid/megaraid_mbox.c
··· 1266 * return the scb from the head of the free list. NULL if there are none 1267 * available 1268 **/ 1269 - static inline scb_t * 1270 megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp) 1271 { 1272 struct list_head *head = &adapter->kscb_pool; ··· 1329 * 1330 * prepare the scatter-gather list 1331 */ 1332 - static inline int 1333 megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb) 1334 { 1335 struct scatterlist *sgl; ··· 1402 * 1403 * post the command to the controller if mailbox is availble. 1404 */ 1405 - static inline int 1406 mbox_post_cmd(adapter_t *adapter, scb_t *scb) 1407 { 1408 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); ··· 2070 * 2071 * Returns: 1 if the interrupt is valid, 0 otherwise 2072 */ 2073 - static inline int 2074 megaraid_ack_sequence(adapter_t *adapter) 2075 { 2076 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); ··· 2208 * 2209 * DMA sync if required. 2210 */ 2211 - static inline void 2212 megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb) 2213 { 2214 mbox_ccb_t *ccb;
··· 1266 * return the scb from the head of the free list. NULL if there are none 1267 * available 1268 **/ 1269 + static scb_t * 1270 megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp) 1271 { 1272 struct list_head *head = &adapter->kscb_pool; ··· 1329 * 1330 * prepare the scatter-gather list 1331 */ 1332 + static int 1333 megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb) 1334 { 1335 struct scatterlist *sgl; ··· 1402 * 1403 * post the command to the controller if mailbox is availble. 1404 */ 1405 + static int 1406 mbox_post_cmd(adapter_t *adapter, scb_t *scb) 1407 { 1408 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); ··· 2070 * 2071 * Returns: 1 if the interrupt is valid, 0 otherwise 2072 */ 2073 + static int 2074 megaraid_ack_sequence(adapter_t *adapter) 2075 { 2076 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); ··· 2208 * 2209 * DMA sync if required. 2210 */ 2211 + static void 2212 megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb) 2213 { 2214 mbox_ccb_t *ccb;
+9 -9
drivers/scsi/megaraid/megaraid_sas.c
··· 81 * 82 * Returns a free command from the pool 83 */ 84 - static inline struct megasas_cmd *megasas_get_cmd(struct megasas_instance 85 *instance) 86 { 87 unsigned long flags; ··· 263 * If successful, this function returns the number of SG elements. Otherwise, 264 * it returnes -1. 265 */ 266 - static inline int 267 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, 268 union megasas_sgl *mfi_sgl) 269 { ··· 311 * If successful, this function returns the number of SG elements. Otherwise, 312 * it returnes -1. 313 */ 314 - static inline int 315 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, 316 union megasas_sgl *mfi_sgl) 317 { ··· 360 * This function prepares CDB commands. These are typcially pass-through 361 * commands to the devices. 362 */ 363 - static inline int 364 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, 365 struct megasas_cmd *cmd) 366 { ··· 441 * 442 * Frames (and accompanying SGLs) for regular SCSI IOs use this function. 443 */ 444 - static inline int 445 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, 446 struct megasas_cmd *cmd) 447 { ··· 563 * @scp: SCSI command 564 * @frame_count: [OUT] Number of frames used to prepare this command 565 */ 566 - static inline struct megasas_cmd *megasas_build_cmd(struct megasas_instance 567 *instance, 568 struct scsi_cmnd *scp, 569 int *frame_count) ··· 914 * @instance: Adapter soft state 915 * @cmd: Completed command 916 */ 917 - static inline void 918 megasas_unmap_sgbuf(struct megasas_instance *instance, struct megasas_cmd *cmd) 919 { 920 dma_addr_t buf_h; ··· 958 * an alternate status (as in the case of aborted 959 * commands) 960 */ 961 - static inline void 962 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 963 u8 alt_status) 964 { ··· 1105 * SCSI mid-layer instead of the status 1106 * returned by the FW 1107 */ 1108 - static inline int 1109 megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status) 1110 { 1111 u32 status;
··· 81 * 82 * Returns a free command from the pool 83 */ 84 + static struct megasas_cmd *megasas_get_cmd(struct megasas_instance 85 *instance) 86 { 87 unsigned long flags; ··· 263 * If successful, this function returns the number of SG elements. Otherwise, 264 * it returnes -1. 265 */ 266 + static int 267 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, 268 union megasas_sgl *mfi_sgl) 269 { ··· 311 * If successful, this function returns the number of SG elements. Otherwise, 312 * it returnes -1. 313 */ 314 + static int 315 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, 316 union megasas_sgl *mfi_sgl) 317 { ··· 360 * This function prepares CDB commands. These are typcially pass-through 361 * commands to the devices. 362 */ 363 + static int 364 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, 365 struct megasas_cmd *cmd) 366 { ··· 441 * 442 * Frames (and accompanying SGLs) for regular SCSI IOs use this function. 443 */ 444 + static int 445 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, 446 struct megasas_cmd *cmd) 447 { ··· 563 * @scp: SCSI command 564 * @frame_count: [OUT] Number of frames used to prepare this command 565 */ 566 + static struct megasas_cmd *megasas_build_cmd(struct megasas_instance 567 *instance, 568 struct scsi_cmnd *scp, 569 int *frame_count) ··· 914 * @instance: Adapter soft state 915 * @cmd: Completed command 916 */ 917 + static void 918 megasas_unmap_sgbuf(struct megasas_instance *instance, struct megasas_cmd *cmd) 919 { 920 dma_addr_t buf_h; ··· 958 * an alternate status (as in the case of aborted 959 * commands) 960 */ 961 + static void 962 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 963 u8 alt_status) 964 { ··· 1105 * SCSI mid-layer instead of the status 1106 * returned by the FW 1107 */ 1108 + static int 1109 megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status) 1110 { 1111 u32 status;
+1 -1
drivers/scsi/sr.c
··· 151 return cd; 152 } 153 154 - static inline void scsi_cd_put(struct scsi_cd *cd) 155 { 156 struct scsi_device *sdev = cd->device; 157
··· 151 return cd; 152 } 153 154 + static void scsi_cd_put(struct scsi_cd *cd) 155 { 156 struct scsi_device *sdev = cd->device; 157
+2 -2
drivers/usb/atm/usbatm.c
··· 207 ** urbs ** 208 ************/ 209 210 - static inline struct urb *usbatm_pop_urb(struct usbatm_channel *channel) 211 { 212 struct urb *urb; 213 ··· 224 return urb; 225 } 226 227 - static inline int usbatm_submit_urb(struct urb *urb) 228 { 229 struct usbatm_channel *channel = urb->context; 230 int ret;
··· 207 ** urbs ** 208 ************/ 209 210 + static struct urb *usbatm_pop_urb(struct usbatm_channel *channel) 211 { 212 struct urb *urb; 213 ··· 224 return urb; 225 } 226 227 + static int usbatm_submit_urb(struct urb *urb) 228 { 229 struct usbatm_channel *channel = urb->context; 230 int ret;
+1 -1
drivers/video/matrox/matroxfb_maven.c
··· 968 return 0; 969 } 970 971 - static inline int maven_program_timming(struct maven_data* md, 972 const struct mavenregs* m) { 973 struct i2c_client* c = md->client; 974
··· 968 return 0; 969 } 970 971 + static int maven_program_timming(struct maven_data* md, 972 const struct mavenregs* m) { 973 struct i2c_client* c = md->client; 974
+16 -16
fs/9p/conv.c
··· 56 return buf->p > buf->ep; 57 } 58 59 - static inline int buf_check_size(struct cbuf *buf, int len) 60 { 61 if (buf->p + len > buf->ep) { 62 if (buf->p < buf->ep) { ··· 72 return 1; 73 } 74 75 - static inline void *buf_alloc(struct cbuf *buf, int len) 76 { 77 void *ret = NULL; 78 ··· 84 return ret; 85 } 86 87 - static inline void buf_put_int8(struct cbuf *buf, u8 val) 88 { 89 if (buf_check_size(buf, 1)) { 90 buf->p[0] = val; ··· 92 } 93 } 94 95 - static inline void buf_put_int16(struct cbuf *buf, u16 val) 96 { 97 if (buf_check_size(buf, 2)) { 98 *(__le16 *) buf->p = cpu_to_le16(val); ··· 100 } 101 } 102 103 - static inline void buf_put_int32(struct cbuf *buf, u32 val) 104 { 105 if (buf_check_size(buf, 4)) { 106 *(__le32 *)buf->p = cpu_to_le32(val); ··· 108 } 109 } 110 111 - static inline void buf_put_int64(struct cbuf *buf, u64 val) 112 { 113 if (buf_check_size(buf, 8)) { 114 *(__le64 *)buf->p = cpu_to_le64(val); ··· 116 } 117 } 118 119 - static inline void buf_put_stringn(struct cbuf *buf, const char *s, u16 slen) 120 { 121 if (buf_check_size(buf, slen + 2)) { 122 buf_put_int16(buf, slen); ··· 130 buf_put_stringn(buf, s, strlen(s)); 131 } 132 133 - static inline u8 buf_get_int8(struct cbuf *buf) 134 { 135 u8 ret = 0; 136 ··· 142 return ret; 143 } 144 145 - static inline u16 buf_get_int16(struct cbuf *buf) 146 { 147 u16 ret = 0; 148 ··· 154 return ret; 155 } 156 157 - static inline u32 buf_get_int32(struct cbuf *buf) 158 { 159 u32 ret = 0; 160 ··· 166 return ret; 167 } 168 169 - static inline u64 buf_get_int64(struct cbuf *buf) 170 { 171 u64 ret = 0; 172 ··· 178 return ret; 179 } 180 181 - static inline void buf_get_str(struct cbuf *buf, struct v9fs_str *vstr) 182 { 183 vstr->len = buf_get_int16(buf); 184 if (!buf_check_overflow(buf) && buf_check_size(buf, vstr->len)) { ··· 190 } 191 } 192 193 - static inline void buf_get_qid(struct cbuf *bufp, struct v9fs_qid *qid) 194 { 195 qid->type = buf_get_int8(bufp); 196 qid->version = buf_get_int32(bufp); ··· 254 * 255 */ 256 257 - static inline void 258 buf_get_stat(struct cbuf *bufp, struct v9fs_stat *stat, int extended) 259 { 260 stat->size = buf_get_int16(bufp); ··· 427 buf_put_int64(bufp, val); 428 } 429 430 - static inline void 431 v9fs_put_str(struct cbuf *bufp, char *data, struct v9fs_str *str) 432 { 433 if (data) { ··· 441 buf_put_stringn(bufp, data, str->len); 442 } 443 444 - static inline int 445 v9fs_put_user_data(struct cbuf *bufp, const char __user * data, int count, 446 unsigned char **pdata) 447 {
··· 56 return buf->p > buf->ep; 57 } 58 59 + static int buf_check_size(struct cbuf *buf, int len) 60 { 61 if (buf->p + len > buf->ep) { 62 if (buf->p < buf->ep) { ··· 72 return 1; 73 } 74 75 + static void *buf_alloc(struct cbuf *buf, int len) 76 { 77 void *ret = NULL; 78 ··· 84 return ret; 85 } 86 87 + static void buf_put_int8(struct cbuf *buf, u8 val) 88 { 89 if (buf_check_size(buf, 1)) { 90 buf->p[0] = val; ··· 92 } 93 } 94 95 + static void buf_put_int16(struct cbuf *buf, u16 val) 96 { 97 if (buf_check_size(buf, 2)) { 98 *(__le16 *) buf->p = cpu_to_le16(val); ··· 100 } 101 } 102 103 + static void buf_put_int32(struct cbuf *buf, u32 val) 104 { 105 if (buf_check_size(buf, 4)) { 106 *(__le32 *)buf->p = cpu_to_le32(val); ··· 108 } 109 } 110 111 + static void buf_put_int64(struct cbuf *buf, u64 val) 112 { 113 if (buf_check_size(buf, 8)) { 114 *(__le64 *)buf->p = cpu_to_le64(val); ··· 116 } 117 } 118 119 + static void buf_put_stringn(struct cbuf *buf, const char *s, u16 slen) 120 { 121 if (buf_check_size(buf, slen + 2)) { 122 buf_put_int16(buf, slen); ··· 130 buf_put_stringn(buf, s, strlen(s)); 131 } 132 133 + static u8 buf_get_int8(struct cbuf *buf) 134 { 135 u8 ret = 0; 136 ··· 142 return ret; 143 } 144 145 + static u16 buf_get_int16(struct cbuf *buf) 146 { 147 u16 ret = 0; 148 ··· 154 return ret; 155 } 156 157 + static u32 buf_get_int32(struct cbuf *buf) 158 { 159 u32 ret = 0; 160 ··· 166 return ret; 167 } 168 169 + static u64 buf_get_int64(struct cbuf *buf) 170 { 171 u64 ret = 0; 172 ··· 178 return ret; 179 } 180 181 + static void buf_get_str(struct cbuf *buf, struct v9fs_str *vstr) 182 { 183 vstr->len = buf_get_int16(buf); 184 if (!buf_check_overflow(buf) && buf_check_size(buf, vstr->len)) { ··· 190 } 191 } 192 193 + static void buf_get_qid(struct cbuf *bufp, struct v9fs_qid *qid) 194 { 195 qid->type = buf_get_int8(bufp); 196 qid->version = buf_get_int32(bufp); ··· 254 * 255 */ 256 257 + static void 258 buf_get_stat(struct cbuf *bufp, struct v9fs_stat *stat, int extended) 259 { 260 stat->size = buf_get_int16(bufp); ··· 427 buf_put_int64(bufp, val); 428 } 429 430 + static void 431 v9fs_put_str(struct cbuf *bufp, char *data, struct v9fs_str *str) 432 { 433 if (data) { ··· 441 buf_put_stringn(bufp, data, str->len); 442 } 443 444 + static int 445 v9fs_put_user_data(struct cbuf *bufp, const char __user * data, int count, 446 unsigned char **pdata) 447 {
+2 -2
fs/binfmt_elf.c
··· 1218 if (!dump_seek(file, (off))) \ 1219 goto end_coredump; 1220 1221 - static inline void fill_elf_header(struct elfhdr *elf, int segs) 1222 { 1223 memcpy(elf->e_ident, ELFMAG, SELFMAG); 1224 elf->e_ident[EI_CLASS] = ELF_CLASS; ··· 1243 return; 1244 } 1245 1246 - static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset) 1247 { 1248 phdr->p_type = PT_NOTE; 1249 phdr->p_offset = offset;
··· 1218 if (!dump_seek(file, (off))) \ 1219 goto end_coredump; 1220 1221 + static void fill_elf_header(struct elfhdr *elf, int segs) 1222 { 1223 memcpy(elf->e_ident, ELFMAG, SELFMAG); 1224 elf->e_ident[EI_CLASS] = ELF_CLASS; ··· 1243 return; 1244 } 1245 1246 + static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset) 1247 { 1248 phdr->p_type = PT_NOTE; 1249 phdr->p_offset = offset;
+1 -1
fs/binfmt_misc.c
··· 264 return p - from; 265 } 266 267 - static inline char * check_special_flags (char * sfs, Node * e) 268 { 269 char * p = sfs; 270 int cont = 1;
··· 264 return p - from; 265 } 266 267 + static char * check_special_flags (char * sfs, Node * e) 268 { 269 char * p = sfs; 270 int cont = 1;
+2 -2
fs/bio.c
··· 123 bio_free(bio, fs_bio_set); 124 } 125 126 - inline void bio_init(struct bio *bio) 127 { 128 bio->bi_next = NULL; 129 bio->bi_bdev = NULL; ··· 253 * the actual data it points to. Reference count of returned 254 * bio will be one. 255 */ 256 - inline void __bio_clone(struct bio *bio, struct bio *bio_src) 257 { 258 request_queue_t *q = bdev_get_queue(bio_src->bi_bdev); 259
··· 123 bio_free(bio, fs_bio_set); 124 } 125 126 + void bio_init(struct bio *bio) 127 { 128 bio->bi_next = NULL; 129 bio->bi_bdev = NULL; ··· 253 * the actual data it points to. Reference count of returned 254 * bio will be one. 255 */ 256 + void __bio_clone(struct bio *bio, struct bio *bio_src) 257 { 258 request_queue_t *q = bdev_get_queue(bio_src->bi_bdev); 259
+3 -3
fs/buffer.c
··· 1165 * some of those buffers may be aliases of filesystem data. 1166 * grow_dev_page() will go BUG() if this happens. 1167 */ 1168 - static inline int 1169 grow_buffers(struct block_device *bdev, sector_t block, int size) 1170 { 1171 struct page *page; ··· 1391 /* 1392 * Look up the bh in this cpu's LRU. If it's there, move it to the head. 1393 */ 1394 - static inline struct buffer_head * 1395 lookup_bh_lru(struct block_device *bdev, sector_t block, int size) 1396 { 1397 struct buffer_head *ret = NULL; ··· 1541 /* 1542 * Called when truncating a buffer on a page completely. 1543 */ 1544 - static inline void discard_buffer(struct buffer_head * bh) 1545 { 1546 lock_buffer(bh); 1547 clear_buffer_dirty(bh);
··· 1165 * some of those buffers may be aliases of filesystem data. 1166 * grow_dev_page() will go BUG() if this happens. 1167 */ 1168 + static int 1169 grow_buffers(struct block_device *bdev, sector_t block, int size) 1170 { 1171 struct page *page; ··· 1391 /* 1392 * Look up the bh in this cpu's LRU. If it's there, move it to the head. 1393 */ 1394 + static struct buffer_head * 1395 lookup_bh_lru(struct block_device *bdev, sector_t block, int size) 1396 { 1397 struct buffer_head *ret = NULL; ··· 1541 /* 1542 * Called when truncating a buffer on a page completely. 1543 */ 1544 + static void discard_buffer(struct buffer_head * bh) 1545 { 1546 lock_buffer(bh); 1547 clear_buffer_dirty(bh);
+2 -2
fs/compat.c
··· 1537 * Ooo, nasty. We need here to frob 32-bit unsigned longs to 1538 * 64-bit unsigned longs. 1539 */ 1540 - static inline 1541 int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, 1542 unsigned long *fdset) 1543 { ··· 1570 return 0; 1571 } 1572 1573 - static inline 1574 void compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, 1575 unsigned long *fdset) 1576 {
··· 1537 * Ooo, nasty. We need here to frob 32-bit unsigned longs to 1538 * 64-bit unsigned longs. 1539 */ 1540 + static 1541 int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, 1542 unsigned long *fdset) 1543 { ··· 1570 return 0; 1571 } 1572 1573 + static 1574 void compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, 1575 unsigned long *fdset) 1576 {
+1 -1
fs/dcache.c
··· 94 * d_iput() operation if defined. 95 * Called with dcache_lock and per dentry lock held, drops both. 96 */ 97 - static inline void dentry_iput(struct dentry * dentry) 98 { 99 struct inode *inode = dentry->d_inode; 100 if (inode) {
··· 94 * d_iput() operation if defined. 95 * Called with dcache_lock and per dentry lock held, drops both. 96 */ 97 + static void dentry_iput(struct dentry * dentry) 98 { 99 struct inode *inode = dentry->d_inode; 100 if (inode) {
+3 -3
fs/exec.c
··· 575 * disturbing other processes. (Other processes might share the signal 576 * table via the CLONE_SIGHAND option to clone().) 577 */ 578 - static inline int de_thread(struct task_struct *tsk) 579 { 580 struct signal_struct *sig = tsk->signal; 581 struct sighand_struct *newsighand, *oldsighand = tsk->sighand; ··· 780 * so that a new one can be started 781 */ 782 783 - static inline void flush_old_files(struct files_struct * files) 784 { 785 long j = -1; 786 struct fdtable *fdt; ··· 964 965 EXPORT_SYMBOL(prepare_binprm); 966 967 - static inline int unsafe_exec(struct task_struct *p) 968 { 969 int unsafe = 0; 970 if (p->ptrace & PT_PTRACED) {
··· 575 * disturbing other processes. (Other processes might share the signal 576 * table via the CLONE_SIGHAND option to clone().) 577 */ 578 + static int de_thread(struct task_struct *tsk) 579 { 580 struct signal_struct *sig = tsk->signal; 581 struct sighand_struct *newsighand, *oldsighand = tsk->sighand; ··· 780 * so that a new one can be started 781 */ 782 783 + static void flush_old_files(struct files_struct * files) 784 { 785 long j = -1; 786 struct fdtable *fdt; ··· 964 965 EXPORT_SYMBOL(prepare_binprm); 966 967 + static int unsafe_exec(struct task_struct *p) 968 { 969 int unsafe = 0; 970 if (p->ptrace & PT_PTRACED) {
+1 -1
fs/fcntl.c
··· 36 spin_unlock(&files->file_lock); 37 } 38 39 - static inline int get_close_on_exec(unsigned int fd) 40 { 41 struct files_struct *files = current->files; 42 struct fdtable *fdt;
··· 36 spin_unlock(&files->file_lock); 37 } 38 39 + static int get_close_on_exec(unsigned int fd) 40 { 41 struct files_struct *files = current->files; 42 struct fdtable *fdt;
+1 -1
fs/jffs2/build.c
··· 47 ic = next_inode(&i, ic, (c))) 48 49 50 - static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, 51 struct jffs2_inode_cache *ic) 52 { 53 struct jffs2_full_dirent *fd;
··· 47 ic = next_inode(&i, ic, (c))) 48 49 50 + static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, 51 struct jffs2_inode_cache *ic) 52 { 53 struct jffs2_full_dirent *fd;
+2 -2
fs/jffs2/nodelist.c
··· 134 /* 135 * Allocate and initializes a new fragment. 136 */ 137 - static inline struct jffs2_node_frag * new_fragment(struct jffs2_full_dnode *fn, uint32_t ofs, uint32_t size) 138 { 139 struct jffs2_node_frag *newfrag; 140 ··· 513 * 514 * Checks the node if we are in the checking stage. 515 */ 516 - static inline int check_node(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn) 517 { 518 int ret; 519
··· 134 /* 135 * Allocate and initializes a new fragment. 136 */ 137 + static struct jffs2_node_frag * new_fragment(struct jffs2_full_dnode *fn, uint32_t ofs, uint32_t size) 138 { 139 struct jffs2_node_frag *newfrag; 140 ··· 513 * 514 * Checks the node if we are in the checking stage. 515 */ 516 + static int check_node(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn) 517 { 518 int ret; 519
+3 -3
fs/lockd/xdr.c
··· 44 /* 45 * XDR functions for basic NLM types 46 */ 47 - static inline u32 *nlm_decode_cookie(u32 *p, struct nlm_cookie *c) 48 { 49 unsigned int len; 50 ··· 79 return p; 80 } 81 82 - static inline u32 * 83 nlm_decode_fh(u32 *p, struct nfs_fh *f) 84 { 85 unsigned int len; ··· 119 return xdr_encode_netobj(p, oh); 120 } 121 122 - static inline u32 * 123 nlm_decode_lock(u32 *p, struct nlm_lock *lock) 124 { 125 struct file_lock *fl = &lock->fl;
··· 44 /* 45 * XDR functions for basic NLM types 46 */ 47 + static u32 *nlm_decode_cookie(u32 *p, struct nlm_cookie *c) 48 { 49 unsigned int len; 50 ··· 79 return p; 80 } 81 82 + static u32 * 83 nlm_decode_fh(u32 *p, struct nfs_fh *f) 84 { 85 unsigned int len; ··· 119 return xdr_encode_netobj(p, oh); 120 } 121 122 + static u32 * 123 nlm_decode_lock(u32 *p, struct nlm_lock *lock) 124 { 125 struct file_lock *fl = &lock->fl;
+3 -3
fs/mbcache.c
··· 126 } 127 128 129 - static inline void 130 __mb_cache_entry_unhash(struct mb_cache_entry *ce) 131 { 132 int n; ··· 139 } 140 141 142 - static inline void 143 __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask) 144 { 145 struct mb_cache *cache = ce->e_cache; ··· 158 } 159 160 161 - static inline void 162 __mb_cache_entry_release_unlock(struct mb_cache_entry *ce) 163 { 164 /* Wake up all processes queuing for this cache entry. */
··· 126 } 127 128 129 + static void 130 __mb_cache_entry_unhash(struct mb_cache_entry *ce) 131 { 132 int n; ··· 139 } 140 141 142 + static void 143 __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask) 144 { 145 struct mb_cache *cache = ce->e_cache; ··· 158 } 159 160 161 + static void 162 __mb_cache_entry_release_unlock(struct mb_cache_entry *ce) 163 { 164 /* Wake up all processes queuing for this cache entry. */
+4 -4
fs/namei.c
··· 113 * POSIX.1 2.4: an empty pathname is invalid (ENOENT). 114 * PATH_MAX includes the nul terminator --RR. 115 */ 116 - static inline int do_getname(const char __user *filename, char *page) 117 { 118 int retval; 119 unsigned long len = PATH_MAX; ··· 396 * short-cut DAC fails, then call permission() to do more 397 * complete permission check. 398 */ 399 - static inline int exec_permission_lite(struct inode *inode, 400 struct nameidata *nd) 401 { 402 umode_t mode = inode->i_mode; ··· 1294 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by 1295 * nfs_async_unlink(). 1296 */ 1297 - static inline int may_delete(struct inode *dir,struct dentry *victim,int isdir) 1298 { 1299 int error; 1300 ··· 2315 return error; 2316 } 2317 2318 - static inline int do_rename(const char * oldname, const char * newname) 2319 { 2320 int error = 0; 2321 struct dentry * old_dir, * new_dir;
··· 113 * POSIX.1 2.4: an empty pathname is invalid (ENOENT). 114 * PATH_MAX includes the nul terminator --RR. 115 */ 116 + static int do_getname(const char __user *filename, char *page) 117 { 118 int retval; 119 unsigned long len = PATH_MAX; ··· 396 * short-cut DAC fails, then call permission() to do more 397 * complete permission check. 398 */ 399 + static int exec_permission_lite(struct inode *inode, 400 struct nameidata *nd) 401 { 402 umode_t mode = inode->i_mode; ··· 1294 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by 1295 * nfs_async_unlink(). 1296 */ 1297 + static int may_delete(struct inode *dir,struct dentry *victim,int isdir) 1298 { 1299 int error; 1300 ··· 2315 return error; 2316 } 2317 2318 + static int do_rename(const char * oldname, const char * newname) 2319 { 2320 int error = 0; 2321 struct dentry * old_dir, * new_dir;
+2 -2
fs/nfsd/nfsxdr.c
··· 37 /* 38 * XDR functions for basic NFS types 39 */ 40 - static inline u32 * 41 decode_fh(u32 *p, struct svc_fh *fhp) 42 { 43 fh_init(fhp, NFS_FHSIZE); ··· 151 return p; 152 } 153 154 - static inline u32 * 155 encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp, 156 struct kstat *stat) 157 {
··· 37 /* 38 * XDR functions for basic NFS types 39 */ 40 + static u32 * 41 decode_fh(u32 *p, struct svc_fh *fhp) 42 { 43 fh_init(fhp, NFS_FHSIZE); ··· 151 return p; 152 } 153 154 + static u32 * 155 encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp, 156 struct kstat *stat) 157 {
+2 -2
fs/pipe.c
··· 50 mutex_lock(PIPE_MUTEX(*inode)); 51 } 52 53 - static inline int 54 pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len) 55 { 56 unsigned long copy; ··· 70 return 0; 71 } 72 73 - static inline int 74 pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len) 75 { 76 unsigned long copy;
··· 50 mutex_lock(PIPE_MUTEX(*inode)); 51 } 52 53 + static int 54 pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len) 55 { 56 unsigned long copy; ··· 70 return 0; 71 } 72 73 + static int 74 pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len) 75 { 76 unsigned long copy;
+1 -1
kernel/cpuset.c
··· 1554 * when reading out p->cpuset, as we don't really care if it changes 1555 * on the next cycle, and we are not going to try to dereference it. 1556 */ 1557 - static inline int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs) 1558 { 1559 int n = 0; 1560 struct task_struct *g, *p;
··· 1554 * when reading out p->cpuset, as we don't really care if it changes 1555 * on the next cycle, and we are not going to try to dereference it. 1556 */ 1557 + static int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs) 1558 { 1559 int n = 0; 1560 struct task_struct *g, *p;
+5 -5
kernel/exit.c
··· 193 return retval; 194 } 195 196 - static inline int has_stopped_jobs(int pgrp) 197 { 198 int retval = 0; 199 struct task_struct *p; ··· 230 * 231 * NOTE that reparent_to_init() gives the caller full capabilities. 232 */ 233 - static inline void reparent_to_init(void) 234 { 235 write_lock_irq(&tasklist_lock); 236 ··· 369 370 EXPORT_SYMBOL(daemonize); 371 372 - static inline void close_files(struct files_struct * files) 373 { 374 int i, j; 375 struct fdtable *fdt; ··· 543 p->real_parent = reaper; 544 } 545 546 - static inline void reparent_thread(task_t *p, task_t *father, int traced) 547 { 548 /* We don't want people slaying init. */ 549 if (p->exit_signal != -1) ··· 607 * group, and if no such member exists, give it to 608 * the global child reaper process (ie "init") 609 */ 610 - static inline void forget_original_parent(struct task_struct * father, 611 struct list_head *to_release) 612 { 613 struct task_struct *p, *reaper = father;
··· 193 return retval; 194 } 195 196 + static int has_stopped_jobs(int pgrp) 197 { 198 int retval = 0; 199 struct task_struct *p; ··· 230 * 231 * NOTE that reparent_to_init() gives the caller full capabilities. 232 */ 233 + static void reparent_to_init(void) 234 { 235 write_lock_irq(&tasklist_lock); 236 ··· 369 370 EXPORT_SYMBOL(daemonize); 371 372 + static void close_files(struct files_struct * files) 373 { 374 int i, j; 375 struct fdtable *fdt; ··· 543 p->real_parent = reaper; 544 } 545 546 + static void reparent_thread(task_t *p, task_t *father, int traced) 547 { 548 /* We don't want people slaying init. */ 549 if (p->exit_signal != -1) ··· 607 * group, and if no such member exists, give it to 608 * the global child reaper process (ie "init") 609 */ 610 + static void forget_original_parent(struct task_struct * father, 611 struct list_head *to_release) 612 { 613 struct task_struct *p, *reaper = father;
+4 -4
kernel/posix-timers.c
··· 192 return do_sys_settimeofday(tp, NULL); 193 } 194 195 - static inline int common_timer_create(struct k_itimer *new_timer) 196 { 197 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock); 198 new_timer->it.real.timer.data = new_timer; ··· 361 return ret; 362 } 363 364 - static inline struct task_struct * good_sigevent(sigevent_t * event) 365 { 366 struct task_struct *rtn = current->group_leader; 367 ··· 687 688 /* Set a POSIX.1b interval timer. */ 689 /* timr->it_lock is taken. */ 690 - static inline int 691 common_timer_set(struct k_itimer *timr, int flags, 692 struct itimerspec *new_setting, struct itimerspec *old_setting) 693 { ··· 829 /* 830 * return timer owned by the process, used by exit_itimers 831 */ 832 - static inline void itimer_delete(struct k_itimer *timer) 833 { 834 unsigned long flags; 835
··· 192 return do_sys_settimeofday(tp, NULL); 193 } 194 195 + static int common_timer_create(struct k_itimer *new_timer) 196 { 197 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock); 198 new_timer->it.real.timer.data = new_timer; ··· 361 return ret; 362 } 363 364 + static struct task_struct * good_sigevent(sigevent_t * event) 365 { 366 struct task_struct *rtn = current->group_leader; 367 ··· 687 688 /* Set a POSIX.1b interval timer. */ 689 /* timr->it_lock is taken. */ 690 + static int 691 common_timer_set(struct k_itimer *timr, int flags, 692 struct itimerspec *new_setting, struct itimerspec *old_setting) 693 { ··· 829 /* 830 * return timer owned by the process, used by exit_itimers 831 */ 832 + static void itimer_delete(struct k_itimer *timer) 833 { 834 unsigned long flags; 835
+8 -8
kernel/sched.c
··· 521 * long it was waiting to run. We also note when it began so that we 522 * can keep stats on how long its timeslice is. 523 */ 524 - static inline void sched_info_arrive(task_t *t) 525 { 526 unsigned long now = jiffies, diff = 0; 527 struct runqueue *rq = task_rq(t); ··· 1007 * We want to under-estimate the load of migration sources, to 1008 * balance conservatively. 1009 */ 1010 - static inline unsigned long __source_load(int cpu, int type, enum idle_type idle) 1011 { 1012 runqueue_t *rq = cpu_rq(cpu); 1013 unsigned long running = rq->nr_running; ··· 1870 * pull_task - move a task from a remote runqueue to the local runqueue. 1871 * Both runqueues must be locked. 1872 */ 1873 - static inline 1874 void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, 1875 runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) 1876 { ··· 1892 /* 1893 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 1894 */ 1895 - static inline 1896 int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, 1897 struct sched_domain *sd, enum idle_type idle, 1898 int *all_pinned) ··· 2378 * idle_balance is called by schedule() if this_cpu is about to become 2379 * idle. Attempts to pull tasks from other CPUs. 2380 */ 2381 - static inline void idle_balance(int this_cpu, runqueue_t *this_rq) 2382 { 2383 struct sched_domain *sd; 2384 ··· 2762 resched_task(rq->idle); 2763 } 2764 2765 - static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) 2766 { 2767 struct sched_domain *tmp, *sd = NULL; 2768 cpumask_t sibling_map; ··· 2816 return p->time_slice * (100 - sd->per_cpu_gain) / 100; 2817 } 2818 2819 - static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) 2820 { 2821 struct sched_domain *tmp, *sd = NULL; 2822 cpumask_t sibling_map; ··· 6008 * Detach sched domains from a group of cpus specified in cpu_map 6009 * These cpus will now be attached to the NULL domain 6010 */ 6011 - static inline void detach_destroy_domains(const cpumask_t *cpu_map) 6012 { 6013 int i; 6014
··· 521 * long it was waiting to run. We also note when it began so that we 522 * can keep stats on how long its timeslice is. 523 */ 524 + static void sched_info_arrive(task_t *t) 525 { 526 unsigned long now = jiffies, diff = 0; 527 struct runqueue *rq = task_rq(t); ··· 1007 * We want to under-estimate the load of migration sources, to 1008 * balance conservatively. 1009 */ 1010 + static unsigned long __source_load(int cpu, int type, enum idle_type idle) 1011 { 1012 runqueue_t *rq = cpu_rq(cpu); 1013 unsigned long running = rq->nr_running; ··· 1870 * pull_task - move a task from a remote runqueue to the local runqueue. 1871 * Both runqueues must be locked. 1872 */ 1873 + static 1874 void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, 1875 runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) 1876 { ··· 1892 /* 1893 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 1894 */ 1895 + static 1896 int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, 1897 struct sched_domain *sd, enum idle_type idle, 1898 int *all_pinned) ··· 2378 * idle_balance is called by schedule() if this_cpu is about to become 2379 * idle. Attempts to pull tasks from other CPUs. 2380 */ 2381 + static void idle_balance(int this_cpu, runqueue_t *this_rq) 2382 { 2383 struct sched_domain *sd; 2384 ··· 2762 resched_task(rq->idle); 2763 } 2764 2765 + static void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) 2766 { 2767 struct sched_domain *tmp, *sd = NULL; 2768 cpumask_t sibling_map; ··· 2816 return p->time_slice * (100 - sd->per_cpu_gain) / 100; 2817 } 2818 2819 + static int dependent_sleeper(int this_cpu, runqueue_t *this_rq) 2820 { 2821 struct sched_domain *tmp, *sd = NULL; 2822 cpumask_t sibling_map; ··· 6008 * Detach sched domains from a group of cpus specified in cpu_map 6009 * These cpus will now be attached to the NULL domain 6010 */ 6011 + static void detach_destroy_domains(const cpumask_t *cpu_map) 6012 { 6013 int i; 6014
+2 -2
kernel/signal.c
··· 476 spin_unlock_irqrestore(&current->sighand->siglock, flags); 477 } 478 479 - static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info) 480 { 481 struct sigqueue *q, *first = NULL; 482 int still_pending = 0; ··· 1881 * We return zero if we still hold the siglock and should look 1882 * for another signal without checking group_stop_count again. 1883 */ 1884 - static inline int handle_group_stop(void) 1885 { 1886 int stop_count; 1887
··· 476 spin_unlock_irqrestore(&current->sighand->siglock, flags); 477 } 478 479 + static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) 480 { 481 struct sigqueue *q, *first = NULL; 482 int still_pending = 0; ··· 1881 * We return zero if we still hold the siglock and should look 1882 * for another signal without checking group_stop_count again. 1883 */ 1884 + static int handle_group_stop(void) 1885 { 1886 int stop_count; 1887
+1 -1
kernel/workqueue.c
··· 147 return ret; 148 } 149 150 - static inline void run_workqueue(struct cpu_workqueue_struct *cwq) 151 { 152 unsigned long flags; 153
··· 147 return ret; 148 } 149 150 + static void run_workqueue(struct cpu_workqueue_struct *cwq) 151 { 152 unsigned long flags; 153
+2 -2
net/ieee80211/ieee80211_module.c
··· 62 MODULE_AUTHOR(DRV_COPYRIGHT); 63 MODULE_LICENSE("GPL"); 64 65 - static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee) 66 { 67 if (ieee->networks) 68 return 0; ··· 90 ieee->networks = NULL; 91 } 92 93 - static inline void ieee80211_networks_initialize(struct ieee80211_device *ieee) 94 { 95 int i; 96
··· 62 MODULE_AUTHOR(DRV_COPYRIGHT); 63 MODULE_LICENSE("GPL"); 64 65 + static int ieee80211_networks_allocate(struct ieee80211_device *ieee) 66 { 67 if (ieee->networks) 68 return 0; ··· 90 ieee->networks = NULL; 91 } 92 93 + static void ieee80211_networks_initialize(struct ieee80211_device *ieee) 94 { 95 int i; 96
+7 -7
net/ieee80211/ieee80211_rx.c
··· 35 36 #include <net/ieee80211.h> 37 38 - static inline void ieee80211_monitor_rx(struct ieee80211_device *ieee, 39 struct sk_buff *skb, 40 struct ieee80211_rx_stats *rx_stats) 41 { ··· 165 * Responsible for handling management control frames 166 * 167 * Called by ieee80211_rx */ 168 - static inline int 169 ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb, 170 struct ieee80211_rx_stats *rx_stats, u16 type, 171 u16 stype) ··· 266 } 267 268 /* Called only as a tasklet (software IRQ), by ieee80211_rx */ 269 - static inline int 270 ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb, 271 struct ieee80211_crypt_data *crypt) 272 { ··· 297 } 298 299 /* Called only as a tasklet (software IRQ), by ieee80211_rx */ 300 - static inline int 301 ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, 302 struct sk_buff *skb, int keyidx, 303 struct ieee80211_crypt_data *crypt) ··· 1156 1157 /***************************************************/ 1158 1159 - static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct ieee80211_probe_response 1160 *beacon, 1161 struct ieee80211_network *network, 1162 struct ieee80211_rx_stats *stats) ··· 1235 !memcmp(src->ssid, dst->ssid, src->ssid_len)); 1236 } 1237 1238 - static inline void update_network(struct ieee80211_network *dst, 1239 struct ieee80211_network *src) 1240 { 1241 int qos_active; ··· 1294 return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON); 1295 } 1296 1297 - static inline void ieee80211_process_probe_response(struct ieee80211_device 1298 *ieee, struct 1299 ieee80211_probe_response 1300 *beacon, struct ieee80211_rx_stats
··· 35 36 #include <net/ieee80211.h> 37 38 + static void ieee80211_monitor_rx(struct ieee80211_device *ieee, 39 struct sk_buff *skb, 40 struct ieee80211_rx_stats *rx_stats) 41 { ··· 165 * Responsible for handling management control frames 166 * 167 * Called by ieee80211_rx */ 168 + static int 169 ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb, 170 struct ieee80211_rx_stats *rx_stats, u16 type, 171 u16 stype) ··· 266 } 267 268 /* Called only as a tasklet (software IRQ), by ieee80211_rx */ 269 + static int 270 ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb, 271 struct ieee80211_crypt_data *crypt) 272 { ··· 297 } 298 299 /* Called only as a tasklet (software IRQ), by ieee80211_rx */ 300 + static int 301 ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, 302 struct sk_buff *skb, int keyidx, 303 struct ieee80211_crypt_data *crypt) ··· 1156 1157 /***************************************************/ 1158 1159 + static int ieee80211_network_init(struct ieee80211_device *ieee, struct ieee80211_probe_response 1160 *beacon, 1161 struct ieee80211_network *network, 1162 struct ieee80211_rx_stats *stats) ··· 1235 !memcmp(src->ssid, dst->ssid, src->ssid_len)); 1236 } 1237 1238 + static void update_network(struct ieee80211_network *dst, 1239 struct ieee80211_network *src) 1240 { 1241 int qos_active; ··· 1294 return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON); 1295 } 1296 1297 + static void ieee80211_process_probe_response(struct ieee80211_device 1298 *ieee, struct 1299 ieee80211_probe_response 1300 *beacon, struct ieee80211_rx_stats
+2 -2
net/ieee80211/ieee80211_tx.c
··· 127 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; 128 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; 129 130 - static inline int ieee80211_copy_snap(u8 * data, u16 h_proto) 131 { 132 struct ieee80211_snap_hdr *snap; 133 u8 *oui; ··· 150 return SNAP_SIZE + sizeof(u16); 151 } 152 153 - static inline int ieee80211_encrypt_fragment(struct ieee80211_device *ieee, 154 struct sk_buff *frag, int hdr_len) 155 { 156 struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
··· 127 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; 128 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; 129 130 + static int ieee80211_copy_snap(u8 * data, u16 h_proto) 131 { 132 struct ieee80211_snap_hdr *snap; 133 u8 *oui; ··· 150 return SNAP_SIZE + sizeof(u16); 151 } 152 153 + static int ieee80211_encrypt_fragment(struct ieee80211_device *ieee, 154 struct sk_buff *frag, int hdr_len) 155 { 156 struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
+1 -1
net/ieee80211/ieee80211_wx.c
··· 42 }; 43 44 #define MAX_CUSTOM_LEN 64 45 - static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee, 46 char *start, char *stop, 47 struct ieee80211_network *network) 48 {
··· 42 }; 43 44 #define MAX_CUSTOM_LEN 64 45 + static char *ipw2100_translate_scan(struct ieee80211_device *ieee, 46 char *start, char *stop, 47 struct ieee80211_network *network) 48 {
+1 -1
net/netfilter/nfnetlink.c
··· 212 } 213 214 /* Process one complete nfnetlink message. */ 215 - static inline int nfnetlink_rcv_msg(struct sk_buff *skb, 216 struct nlmsghdr *nlh, int *errp) 217 { 218 struct nfnl_callback *nc;
··· 212 } 213 214 /* Process one complete nfnetlink message. */ 215 + static int nfnetlink_rcv_msg(struct sk_buff *skb, 216 struct nlmsghdr *nlh, int *errp) 217 { 218 struct nfnl_callback *nc;
+1 -1
security/selinux/hooks.c
··· 1019 has the same SID as the process. If av is zero, then 1020 access to the file is not checked, e.g. for cases 1021 where only the descriptor is affected like seek. */ 1022 - static inline int file_has_perm(struct task_struct *tsk, 1023 struct file *file, 1024 u32 av) 1025 {
··· 1019 has the same SID as the process. If av is zero, then 1020 access to the file is not checked, e.g. for cases 1021 where only the descriptor is affected like seek. */ 1022 + static int file_has_perm(struct task_struct *tsk, 1023 struct file *file, 1024 u32 av) 1025 {
+1 -1
sound/oss/esssolo1.c
··· 515 return 0; 516 } 517 518 - static inline int prog_dmabuf_dac(struct solo1_state *s) 519 { 520 unsigned long va; 521 int c;
··· 515 return 0; 516 } 517 518 + static int prog_dmabuf_dac(struct solo1_state *s) 519 { 520 unsigned long va; 521 int c;
+2 -2
sound/pci/es1968.c
··· 727 apu_data_set(chip, data); 728 } 729 730 - static inline void apu_set_register(struct es1968 *chip, u16 channel, u8 reg, u16 data) 731 { 732 unsigned long flags; 733 spin_lock_irqsave(&chip->reg_lock, flags); ··· 743 return __maestro_read(chip, IDR0_DATA_PORT); 744 } 745 746 - static inline u16 apu_get_register(struct es1968 *chip, u16 channel, u8 reg) 747 { 748 unsigned long flags; 749 u16 v;
··· 727 apu_data_set(chip, data); 728 } 729 730 + static void apu_set_register(struct es1968 *chip, u16 channel, u8 reg, u16 data) 731 { 732 unsigned long flags; 733 spin_lock_irqsave(&chip->reg_lock, flags); ··· 743 return __maestro_read(chip, IDR0_DATA_PORT); 744 } 745 746 + static u16 apu_get_register(struct es1968 *chip, u16 channel, u8 reg) 747 { 748 unsigned long flags; 749 u16 v;