···1+#ifndef __LINUX_TEXTSEARCH_H2+#define __LINUX_TEXTSEARCH_H3+4+#ifdef __KERNEL__5+6+#include <linux/types.h>7+#include <linux/list.h>8+#include <linux/kernel.h>9+#include <linux/module.h>10+#include <linux/err.h>11+12+struct ts_config;13+14+/**15+ * TS_AUTOLOAD - Automatically load textsearch modules when needed16+ */17+#define TS_AUTOLOAD 118+19+/**20+ * struct ts_state - search state21+ * @offset: offset for next match22+ * @cb: control buffer, for persistant variables of get_next_block()23+ */24+struct ts_state25+{26+ unsigned int offset;27+ char cb[40];28+};29+30+/**31+ * struct ts_ops - search module operations32+ * @name: name of search algorithm33+ * @init: initialization function to prepare a search34+ * @find: find the next occurrence of the pattern35+ * @destroy: destroy algorithm specific parts of a search configuration36+ * @get_pattern: return head of pattern37+ * @get_pattern_len: return length of pattern38+ * @owner: module reference to algorithm39+ */40+struct ts_ops41+{42+ const char *name;43+ struct ts_config * (*init)(const void *, unsigned int, int);44+ unsigned int (*find)(struct ts_config *,45+ struct ts_state *);46+ void (*destroy)(struct ts_config *);47+ void * (*get_pattern)(struct ts_config *);48+ unsigned int (*get_pattern_len)(struct ts_config *);49+ struct module *owner;50+ struct list_head list;51+};52+53+/**54+ * struct ts_config - search configuration55+ * @ops: operations of chosen algorithm56+ * @get_next_block: callback to fetch the next block to search in57+ * @finish: callback to finalize a search58+ */59+struct ts_config60+{61+ struct ts_ops *ops;62+63+ /**64+ * get_next_block - fetch next block of data65+ * @consumed: number of bytes consumed by the caller66+ * @dst: destination buffer67+ * @conf: search configuration68+ * @state: search state69+ *70+ * Called repeatedly until 0 is returned. Must assign the71+ * head of the next block of data to &*dst and return the length72+ * of the block or 0 if at the end. consumed == 0 indicates73+ * a new search. May store/read persistant values in state->cb.74+ */75+ unsigned int (*get_next_block)(unsigned int consumed,76+ const u8 **dst,77+ struct ts_config *conf,78+ struct ts_state *state);79+80+ /**81+ * finish - finalize/clean a series of get_next_block() calls82+ * @conf: search configuration83+ * @state: search state84+ *85+ * Called after the last use of get_next_block(), may be used86+ * to cleanup any leftovers.87+ */88+ void (*finish)(struct ts_config *conf,89+ struct ts_state *state);90+};91+92+/**93+ * textsearch_next - continue searching for a pattern94+ * @conf: search configuration95+ * @state: search state96+ *97+ * Continues a search looking for more occurrences of the pattern.98+ * textsearch_find() must be called to find the first occurrence99+ * in order to reset the state.100+ *101+ * Returns the position of the next occurrence of the pattern or102+ * UINT_MAX if not match was found.103+ */ 104+static inline unsigned int textsearch_next(struct ts_config *conf,105+ struct ts_state *state)106+{107+ unsigned int ret = conf->ops->find(conf, state);108+109+ if (conf->finish)110+ conf->finish(conf, state);111+112+ return ret;113+}114+115+/**116+ * textsearch_find - start searching for a pattern117+ * @conf: search configuration118+ * @state: search state119+ *120+ * Returns the position of first occurrence of the pattern or121+ * UINT_MAX if no match was found.122+ */ 123+static inline unsigned int textsearch_find(struct ts_config *conf,124+ struct ts_state *state)125+{126+ state->offset = 0;127+ return textsearch_next(conf, state);128+}129+130+/**131+ * textsearch_get_pattern - return head of the pattern132+ * @conf: search configuration133+ */134+static inline void *textsearch_get_pattern(struct ts_config *conf)135+{136+ return conf->ops->get_pattern(conf);137+}138+139+/**140+ * textsearch_get_pattern_len - return length of the pattern141+ * @conf: search configuration142+ */143+static inline unsigned int textsearch_get_pattern_len(struct ts_config *conf)144+{145+ return conf->ops->get_pattern_len(conf);146+}147+148+extern int textsearch_register(struct ts_ops *);149+extern int textsearch_unregister(struct ts_ops *);150+extern struct ts_config *textsearch_prepare(const char *, const void *,151+ unsigned int, int, int);152+extern void textsearch_destroy(struct ts_config *conf);153+extern unsigned int textsearch_find_continuous(struct ts_config *,154+ struct ts_state *,155+ const void *, unsigned int);156+157+158+#define TS_PRIV_ALIGNTO 8159+#define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1))160+161+static inline struct ts_config *alloc_ts_config(size_t payload, int gfp_mask)162+{163+ struct ts_config *conf;164+165+ conf = kmalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask);166+ if (conf == NULL)167+ return ERR_PTR(-ENOMEM);168+169+ memset(conf, 0, TS_PRIV_ALIGN(sizeof(*conf)) + payload);170+ return conf;171+}172+173+static inline void *ts_config_priv(struct ts_config *conf)174+{175+ return ((u8 *) conf + TS_PRIV_ALIGN(sizeof(struct ts_config)));176+}177+178+#endif /* __KERNEL__ */179+180+#endif
···63config REED_SOLOMON_DEC1664 boolean6566+config TEXTSEARCH67+ boolean "Textsearch infrastructure"68+ default y69+ help70+ Say Y here if you want to provide a textsearch infrastructure71+ to other subsystems.7273+config TEXTSEARCH_KMP74+ depends on TEXTSEARCH75+ tristate "Knuth-Morris-Pratt"76+ help77+ Say Y here if you want to be able to search text using the78+ Knuth-Morris-Pratt textsearch algorithm.79+80+ To compile this code as a module, choose M here: the81+ module will be called ts_kmp.82+83+config TEXTSEARCH_FSM84+ depends on TEXTSEARCH85+ tristate "Finite state machine"86+ help87+ Say Y here if you want to be able to search text using a88+ naive finite state machine approach implementing a subset89+ of regular expressions.90+91+ To compile this code as a module, choose M here: the92+ module will be called ts_fsm.93+94+endmenu
···1+/*2+ * lib/textsearch.c Generic text search interface3+ *4+ * This program is free software; you can redistribute it and/or5+ * modify it under the terms of the GNU General Public License6+ * as published by the Free Software Foundation; either version7+ * 2 of the License, or (at your option) any later version.8+ *9+ * Authors: Thomas Graf <tgraf@suug.ch>10+ * Pablo Neira Ayuso <pablo@eurodev.net>11+ *12+ * ==========================================================================13+ *14+ * INTRODUCTION15+ *16+ * The textsearch infrastructure provides text searching facitilies for17+ * both linear and non-linear data. Individual search algorithms are18+ * implemented in modules and chosen by the user.19+ *20+ * ARCHITECTURE21+ *22+ * User23+ * +----------------+24+ * | finish()|<--------------(6)-----------------+25+ * |get_next_block()|<--------------(5)---------------+ |26+ * | | Algorithm | |27+ * | | +------------------------------+28+ * | | | init() find() destroy() |29+ * | | +------------------------------+30+ * | | Core API ^ ^ ^31+ * | | +---------------+ (2) (4) (8)32+ * | (1)|----->| prepare() |---+ | |33+ * | (3)|----->| find()/next() |-----------+ |34+ * | (7)|----->| destroy() |----------------------+35+ * +----------------+ +---------------+36+ * 37+ * (1) User configures a search by calling _prepare() specifying the38+ * search parameters such as the pattern and algorithm name.39+ * (2) Core requests the algorithm to allocate and initialize a search40+ * configuration according to the specified parameters.41+ * (3) User starts the search(es) by calling _find() or _next() to42+ * fetch subsequent occurrences. A state variable is provided43+ * to the algorihtm to store persistant variables.44+ * (4) Core eventually resets the search offset and forwards the find()45+ * request to the algorithm.46+ * (5) Algorithm calls get_next_block() provided by the user continously47+ * to fetch the data to be searched in block by block.48+ * (6) Algorithm invokes finish() after the last call to get_next_block49+ * to clean up any leftovers from get_next_block. (Optional)50+ * (7) User destroys the configuration by calling _destroy().51+ * (8) Core notifies the algorithm to destroy algorithm specific52+ * allocations. (Optional)53+ *54+ * USAGE55+ *56+ * Before a search can be performed, a configuration must be created57+ * by calling textsearch_prepare() specyfing the searching algorithm and58+ * the pattern to look for. The returned configuration may then be used59+ * for an arbitary amount of times and even in parallel as long as a60+ * separate struct ts_state variable is provided to every instance.61+ *62+ * The actual search is performed by either calling textsearch_find_-63+ * continuous() for linear data or by providing an own get_next_block()64+ * implementation and calling textsearch_find(). Both functions return65+ * the position of the first occurrence of the patern or UINT_MAX if66+ * no match was found. Subsequent occurences can be found by calling67+ * textsearch_next() regardless of the linearity of the data.68+ *69+ * Once you're done using a configuration it must be given back via70+ * textsearch_destroy.71+ *72+ * EXAMPLE73+ *74+ * int pos;75+ * struct ts_config *conf;76+ * struct ts_state state;77+ * const char *pattern = "chicken";78+ * const char *example = "We dance the funky chicken";79+ *80+ * conf = textsearch_prepare("kmp", pattern, strlen(pattern),81+ * GFP_KERNEL, TS_AUTOLOAD);82+ * if (IS_ERR(conf)) {83+ * err = PTR_ERR(conf);84+ * goto errout;85+ * }86+ *87+ * pos = textsearch_find_continuous(conf, &state, example, strlen(example));88+ * if (pos != UINT_MAX)89+ * panic("Oh my god, dancing chickens at %d\n", pos);90+ *91+ * textsearch_destroy(conf);92+ *93+ * ==========================================================================94+ */95+96+#include <linux/config.h>97+#include <linux/module.h>98+#include <linux/types.h>99+#include <linux/string.h>100+#include <linux/init.h>101+#include <linux/rcupdate.h>102+#include <linux/err.h>103+#include <linux/textsearch.h>104+105+static LIST_HEAD(ts_ops);106+static DEFINE_SPINLOCK(ts_mod_lock);107+108+static inline struct ts_ops *lookup_ts_algo(const char *name)109+{110+ struct ts_ops *o;111+112+ rcu_read_lock();113+ list_for_each_entry_rcu(o, &ts_ops, list) {114+ if (!strcmp(name, o->name)) {115+ if (!try_module_get(o->owner))116+ o = NULL;117+ rcu_read_unlock();118+ return o;119+ }120+ }121+ rcu_read_unlock();122+123+ return NULL;124+}125+126+/**127+ * textsearch_register - register a textsearch module128+ * @ops: operations lookup table129+ *130+ * This function must be called by textsearch modules to announce131+ * their presence. The specified &@ops must have %name set to a132+ * unique identifier and the callbacks find(), init(), get_pattern(),133+ * and get_pattern_len() must be implemented.134+ *135+ * Returns 0 or -EEXISTS if another module has already registered136+ * with same name.137+ */138+int textsearch_register(struct ts_ops *ops)139+{140+ int err = -EEXIST;141+ struct ts_ops *o;142+143+ if (ops->name == NULL || ops->find == NULL || ops->init == NULL ||144+ ops->get_pattern == NULL || ops->get_pattern_len == NULL)145+ return -EINVAL;146+147+ spin_lock(&ts_mod_lock);148+ list_for_each_entry(o, &ts_ops, list) {149+ if (!strcmp(ops->name, o->name))150+ goto errout;151+ }152+153+ list_add_tail_rcu(&ops->list, &ts_ops);154+ err = 0;155+errout:156+ spin_unlock(&ts_mod_lock);157+ return err;158+}159+160+/**161+ * textsearch_unregister - unregister a textsearch module162+ * @ops: operations lookup table163+ *164+ * This function must be called by textsearch modules to announce165+ * their disappearance for examples when the module gets unloaded.166+ * The &ops parameter must be the same as the one during the167+ * registration.168+ *169+ * Returns 0 on success or -ENOENT if no matching textsearch170+ * registration was found.171+ */172+int textsearch_unregister(struct ts_ops *ops)173+{174+ int err = 0;175+ struct ts_ops *o;176+177+ spin_lock(&ts_mod_lock);178+ list_for_each_entry(o, &ts_ops, list) {179+ if (o == ops) {180+ list_del_rcu(&o->list);181+ goto out;182+ }183+ }184+185+ err = -ENOENT;186+out:187+ spin_unlock(&ts_mod_lock);188+ return err;189+}190+191+struct ts_linear_state192+{193+ unsigned int len;194+ const void *data;195+};196+197+static unsigned int get_linear_data(unsigned int consumed, const u8 **dst,198+ struct ts_config *conf,199+ struct ts_state *state)200+{201+ struct ts_linear_state *st = (struct ts_linear_state *) state->cb;202+203+ if (likely(consumed < st->len)) {204+ *dst = st->data + consumed;205+ return st->len - consumed;206+ }207+208+ return 0;209+}210+211+/**212+ * textsearch_find_continuous - search a pattern in continuous/linear data213+ * @conf: search configuration214+ * @state: search state215+ * @data: data to search in216+ * @len: length of data217+ *218+ * A simplified version of textsearch_find() for continuous/linear data.219+ * Call textsearch_next() to retrieve subsequent matches.220+ *221+ * Returns the position of first occurrence of the pattern or222+ * UINT_MAX if no occurrence was found.223+ */ 224+unsigned int textsearch_find_continuous(struct ts_config *conf,225+ struct ts_state *state,226+ const void *data, unsigned int len)227+{228+ struct ts_linear_state *st = (struct ts_linear_state *) state->cb;229+230+ conf->get_next_block = get_linear_data;231+ st->data = data;232+ st->len = len;233+234+ return textsearch_find(conf, state);235+}236+237+/**238+ * textsearch_prepare - Prepare a search239+ * @algo: name of search algorithm240+ * @pattern: pattern data241+ * @len: length of pattern242+ * @gfp_mask: allocation mask243+ * @flags: search flags244+ *245+ * Looks up the search algorithm module and creates a new textsearch246+ * configuration for the specified pattern. Upon completion all247+ * necessary refcnts are held and the configuration must be put back248+ * using textsearch_put() after usage.249+ *250+ * Note: The format of the pattern may not be compatible between251+ * the various search algorithms.252+ *253+ * Returns a new textsearch configuration according to the specified254+ * parameters or a ERR_PTR().255+ */256+struct ts_config *textsearch_prepare(const char *algo, const void *pattern,257+ unsigned int len, int gfp_mask, int flags)258+{259+ int err = -ENOENT;260+ struct ts_config *conf;261+ struct ts_ops *ops;262+263+ ops = lookup_ts_algo(algo);264+#ifdef CONFIG_KMOD265+ /*266+ * Why not always autoload you may ask. Some users are267+ * in a situation where requesting a module may deadlock,268+ * especially when the module is located on a NFS mount.269+ */270+ if (ops == NULL && flags & TS_AUTOLOAD) {271+ request_module("ts_%s", algo);272+ ops = lookup_ts_algo(algo);273+ }274+#endif275+276+ if (ops == NULL)277+ goto errout;278+279+ conf = ops->init(pattern, len, gfp_mask);280+ if (IS_ERR(conf)) {281+ err = PTR_ERR(conf);282+ goto errout;283+ }284+285+ conf->ops = ops;286+ return conf;287+288+errout:289+ if (ops)290+ module_put(ops->owner);291+292+ return ERR_PTR(err);293+}294+295+/**296+ * textsearch_destroy - destroy a search configuration297+ * @conf: search configuration298+ *299+ * Releases all references of the configuration and frees300+ * up the memory.301+ */302+void textsearch_destroy(struct ts_config *conf)303+{304+ if (conf->ops) {305+ if (conf->ops->destroy)306+ conf->ops->destroy(conf);307+ module_put(conf->ops->owner);308+ }309+310+ kfree(conf);311+}312+313+EXPORT_SYMBOL(textsearch_register);314+EXPORT_SYMBOL(textsearch_unregister);315+EXPORT_SYMBOL(textsearch_prepare);316+EXPORT_SYMBOL(textsearch_find_continuous);317+EXPORT_SYMBOL(textsearch_destroy);
···1+/*2+ * lib/ts_kmp.c Knuth-Morris-Pratt text search implementation3+ *4+ * This program is free software; you can redistribute it and/or5+ * modify it under the terms of the GNU General Public License6+ * as published by the Free Software Foundation; either version7+ * 2 of the License, or (at your option) any later version.8+ *9+ * Authors: Thomas Graf <tgraf@suug.ch>10+ *11+ * ==========================================================================12+ * 13+ * Implements a linear-time string-matching algorithm due to Knuth,14+ * Morris, and Pratt [1]. Their algorithm avoids the explicit15+ * computation of the transition function DELTA altogether. Its16+ * matching time is O(n), for n being length(text), using just an17+ * auxiliary function PI[1..m], for m being length(pattern),18+ * precomputed from the pattern in time O(m). The array PI allows19+ * the transition function DELTA to be computed efficiently20+ * "on the fly" as needed. Roughly speaking, for any state21+ * "q" = 0,1,...,m and any character "a" in SIGMA, the value22+ * PI["q"] contains the information that is independent of "a" and23+ * is needed to compute DELTA("q", "a") [2]. Since the array PI24+ * has only m entries, whereas DELTA has O(m|SIGMA|) entries, we25+ * save a factor of |SIGMA| in the preprocessing time by computing26+ * PI rather than DELTA.27+ *28+ * [1] Cormen, Leiserson, Rivest, Stein29+ * Introdcution to Algorithms, 2nd Edition, MIT Press30+ * [2] See finite automation theory31+ */32+33+#include <linux/config.h>34+#include <linux/module.h>35+#include <linux/types.h>36+#include <linux/string.h>37+#include <linux/textsearch.h>38+39+struct ts_kmp40+{41+ u8 * pattern;42+ unsigned int pattern_len;43+ unsigned int prefix_tbl[0];44+};45+46+static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)47+{48+ struct ts_kmp *kmp = ts_config_priv(conf);49+ unsigned int i, q = 0, text_len, consumed = state->offset;50+ const u8 *text;51+52+ for (;;) {53+ text_len = conf->get_next_block(consumed, &text, conf, state);54+55+ if (unlikely(text_len == 0))56+ break;57+58+ for (i = 0; i < text_len; i++) {59+ while (q > 0 && kmp->pattern[q] != text[i])60+ q = kmp->prefix_tbl[q - 1];61+ if (kmp->pattern[q] == text[i])62+ q++;63+ if (unlikely(q == kmp->pattern_len)) {64+ state->offset = consumed + i + 1;65+ return state->offset - kmp->pattern_len;66+ }67+ }68+69+ consumed += text_len;70+ }71+72+ return UINT_MAX;73+}74+75+static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len,76+ unsigned int *prefix_tbl)77+{78+ unsigned int k, q;79+80+ for (k = 0, q = 1; q < len; q++) {81+ while (k > 0 && pattern[k] != pattern[q])82+ k = prefix_tbl[k-1];83+ if (pattern[k] == pattern[q])84+ k++;85+ prefix_tbl[q] = k;86+ }87+}88+89+static struct ts_config *kmp_init(const void *pattern, unsigned int len,90+ int gfp_mask)91+{92+ struct ts_config *conf;93+ struct ts_kmp *kmp;94+ unsigned int prefix_tbl_len = len * sizeof(unsigned int);95+ size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len;96+97+ conf = alloc_ts_config(priv_size, gfp_mask);98+ if (IS_ERR(conf))99+ return conf;100+101+ kmp = ts_config_priv(conf);102+ kmp->pattern_len = len;103+ compute_prefix_tbl(pattern, len, kmp->prefix_tbl);104+ kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len;105+ memcpy(kmp->pattern, pattern, len);106+107+ return conf;108+}109+110+static void *kmp_get_pattern(struct ts_config *conf)111+{112+ struct ts_kmp *kmp = ts_config_priv(conf);113+ return kmp->pattern;114+}115+116+static unsigned int kmp_get_pattern_len(struct ts_config *conf)117+{118+ struct ts_kmp *kmp = ts_config_priv(conf);119+ return kmp->pattern_len;120+}121+122+static struct ts_ops kmp_ops = {123+ .name = "kmp",124+ .find = kmp_find,125+ .init = kmp_init,126+ .get_pattern = kmp_get_pattern,127+ .get_pattern_len = kmp_get_pattern_len,128+ .owner = THIS_MODULE,129+ .list = LIST_HEAD_INIT(kmp_ops.list)130+};131+132+static int __init init_kmp(void)133+{134+ return textsearch_register(&kmp_ops);135+}136+137+static void __exit exit_kmp(void)138+{139+ textsearch_unregister(&kmp_ops);140+}141+142+MODULE_LICENSE("GPL");143+144+module_init(init_kmp);145+module_exit(exit_kmp);
+8-117
net/core/dev.c
···115#endif /* CONFIG_NET_RADIO */116#include <asm/current.h>117118-/* This define, if set, will randomly drop a packet when congestion119- * is more than moderate. It helps fairness in the multi-interface120- * case when one of them is a hog, but it kills performance for the121- * single interface case so it is off now by default.122- */123-#undef RAND_LIE124-125-/* Setting this will sample the queue lengths and thus congestion126- * via a timer instead of as each packet is received.127- */128-#undef OFFLINE_SAMPLE129-130/*131 * The list of packet types we will receive (as opposed to discard)132 * and the routines to invoke.···146static DEFINE_SPINLOCK(ptype_lock);147static struct list_head ptype_base[16]; /* 16 way hashed list */148static struct list_head ptype_all; /* Taps */149-150-#ifdef OFFLINE_SAMPLE151-static void sample_queue(unsigned long dummy);152-static struct timer_list samp_timer = TIMER_INITIALIZER(sample_queue, 0, 0);153-#endif154155/*156 * The @dev_base list is protected by @dev_base_lock and the rtln···198 * Device drivers call our routines to queue packets here. We empty the199 * queue in the local softnet handler.200 */201-DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, };202203#ifdef CONFIG_SYSFS204extern int netdev_sysfs_init(void);···1346 Receiver routines1347 =======================================================================*/13481349-int netdev_max_backlog = 300;01350int weight_p = 64; /* old backlog weight */1351-/* These numbers are selected based on intuition and some1352- * experimentatiom, if you have more scientific way of doing this1353- * please go ahead and fix things.1354- */1355-int no_cong_thresh = 10;1356-int no_cong = 20;1357-int lo_cong = 100;1358-int mod_cong = 290;13591360DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };1361-1362-1363-static void get_sample_stats(int cpu)1364-{1365-#ifdef RAND_LIE1366- unsigned long rd;1367- int rq;1368-#endif1369- struct softnet_data *sd = &per_cpu(softnet_data, cpu);1370- int blog = sd->input_pkt_queue.qlen;1371- int avg_blog = sd->avg_blog;1372-1373- avg_blog = (avg_blog >> 1) + (blog >> 1);1374-1375- if (avg_blog > mod_cong) {1376- /* Above moderate congestion levels. */1377- sd->cng_level = NET_RX_CN_HIGH;1378-#ifdef RAND_LIE1379- rd = net_random();1380- rq = rd % netdev_max_backlog;1381- if (rq < avg_blog) /* unlucky bastard */1382- sd->cng_level = NET_RX_DROP;1383-#endif1384- } else if (avg_blog > lo_cong) {1385- sd->cng_level = NET_RX_CN_MOD;1386-#ifdef RAND_LIE1387- rd = net_random();1388- rq = rd % netdev_max_backlog;1389- if (rq < avg_blog) /* unlucky bastard */1390- sd->cng_level = NET_RX_CN_HIGH;1391-#endif1392- } else if (avg_blog > no_cong)1393- sd->cng_level = NET_RX_CN_LOW;1394- else /* no congestion */1395- sd->cng_level = NET_RX_SUCCESS;1396-1397- sd->avg_blog = avg_blog;1398-}1399-1400-#ifdef OFFLINE_SAMPLE1401-static void sample_queue(unsigned long dummy)1402-{1403-/* 10 ms 0r 1ms -- i don't care -- JHS */1404- int next_tick = 1;1405- int cpu = smp_processor_id();1406-1407- get_sample_stats(cpu);1408- next_tick += jiffies;1409- mod_timer(&samp_timer, next_tick);1410-}1411-#endif141214131414/**···13731374int netif_rx(struct sk_buff *skb)1375{1376- int this_cpu;1377 struct softnet_data *queue;1378 unsigned long flags;1379···1388 * short when CPU is congested, but is still operating.1389 */1390 local_irq_save(flags);1391- this_cpu = smp_processor_id();1392 queue = &__get_cpu_var(softnet_data);13931394 __get_cpu_var(netdev_rx_stat).total++;1395 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {1396 if (queue->input_pkt_queue.qlen) {1397- if (queue->throttle)1398- goto drop;1399-1400enqueue:1401 dev_hold(skb->dev);1402 __skb_queue_tail(&queue->input_pkt_queue, skb);1403-#ifndef OFFLINE_SAMPLE1404- get_sample_stats(this_cpu);1405-#endif1406 local_irq_restore(flags);1407- return queue->cng_level;1408 }1409-1410- if (queue->throttle)1411- queue->throttle = 0;14121413 netif_rx_schedule(&queue->backlog_dev);1414 goto enqueue;1415 }14161417- if (!queue->throttle) {1418- queue->throttle = 1;1419- __get_cpu_var(netdev_rx_stat).throttled++;1420- }1421-1422-drop:1423 __get_cpu_var(netdev_rx_stat).dropped++;1424 local_irq_restore(flags);1425···1688 smp_mb__before_clear_bit();1689 netif_poll_enable(backlog_dev);16901691- if (queue->throttle)1692- queue->throttle = 0;1693 local_irq_enable();1694 return 0;1695}···1696{1697 struct softnet_data *queue = &__get_cpu_var(softnet_data);1698 unsigned long start_time = jiffies;1699- int budget = netdev_max_backlog;1700-17011702 local_irq_disable();1703···1960 struct netif_rx_stats *s = v;19611962 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",1963- s->total, s->dropped, s->time_squeeze, s->throttled,1964- s->fastroute_hit, s->fastroute_success, s->fastroute_defer,1965- s->fastroute_deferred_out,1966-#if 01967- s->fastroute_latency_reduction1968-#else1969- s->cpu_collision1970-#endif1971- );1972 return 0;1973}1974···32043205 queue = &per_cpu(softnet_data, i);3206 skb_queue_head_init(&queue->input_pkt_queue);3207- queue->throttle = 0;3208- queue->cng_level = 0;3209- queue->avg_blog = 10; /* arbitrary non-zero */3210 queue->completion_queue = NULL;3211 INIT_LIST_HEAD(&queue->poll_list);3212 set_bit(__LINK_STATE_START, &queue->backlog_dev.state);···3211 queue->backlog_dev.poll = process_backlog;3212 atomic_set(&queue->backlog_dev.refcnt, 1);3213 }3214-3215-#ifdef OFFLINE_SAMPLE3216- samp_timer.expires = jiffies + (10 * HZ);3217- add_timer(&samp_timer);3218-#endif32193220 dev_boot_phase = 0;3221
···115#endif /* CONFIG_NET_RADIO */116#include <asm/current.h>117000000000000118/*119 * The list of packet types we will receive (as opposed to discard)120 * and the routines to invoke.···158static DEFINE_SPINLOCK(ptype_lock);159static struct list_head ptype_base[16]; /* 16 way hashed list */160static struct list_head ptype_all; /* Taps */00000161162/*163 * The @dev_base list is protected by @dev_base_lock and the rtln···215 * Device drivers call our routines to queue packets here. We empty the216 * queue in the local softnet handler.217 */218+DEFINE_PER_CPU(struct softnet_data, softnet_data) = { NULL };219220#ifdef CONFIG_SYSFS221extern int netdev_sysfs_init(void);···1363 Receiver routines1364 =======================================================================*/13651366+int netdev_max_backlog = 1000;1367+int netdev_budget = 300;1368int weight_p = 64; /* old backlog weight */0000000013691370DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };000000000000000000000000000000000000000000000000000137113721373/**···14481449int netif_rx(struct sk_buff *skb)1450{01451 struct softnet_data *queue;1452 unsigned long flags;1453···1464 * short when CPU is congested, but is still operating.1465 */1466 local_irq_save(flags);01467 queue = &__get_cpu_var(softnet_data);14681469 __get_cpu_var(netdev_rx_stat).total++;1470 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {1471 if (queue->input_pkt_queue.qlen) {0001472enqueue:1473 dev_hold(skb->dev);1474 __skb_queue_tail(&queue->input_pkt_queue, skb);0001475 local_irq_restore(flags);1476+ return NET_RX_SUCCESS;1477 }00014781479 netif_rx_schedule(&queue->backlog_dev);1480 goto enqueue;1481 }14820000001483 __get_cpu_var(netdev_rx_stat).dropped++;1484 local_irq_restore(flags);1485···1780 smp_mb__before_clear_bit();1781 netif_poll_enable(backlog_dev);1782001783 local_irq_enable();1784 return 0;1785}···1790{1791 struct softnet_data *queue = &__get_cpu_var(softnet_data);1792 unsigned long start_time = jiffies;1793+ int budget = netdev_budget;017941795 local_irq_disable();1796···2055 struct netif_rx_stats *s = v;20562057 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",2058+ s->total, s->dropped, s->time_squeeze, 0,2059+ 0, 0, 0, 0, /* was fastroute */2060+ s->cpu_collision );0000002061 return 0;2062}2063···33053306 queue = &per_cpu(softnet_data, i);3307 skb_queue_head_init(&queue->input_pkt_queue);0003308 queue->completion_queue = NULL;3309 INIT_LIST_HEAD(&queue->poll_list);3310 set_bit(__LINK_STATE_START, &queue->backlog_dev.state);···3315 queue->backlog_dev.poll = process_backlog;3316 atomic_set(&queue->backlog_dev.refcnt, 1);3317 }0000033183319 dev_boot_phase = 0;3320
···1500 skb_split_no_header(skb, skb1, len, pos);1501}15021503+/**1504+ * skb_prepare_seq_read - Prepare a sequential read of skb data1505+ * @skb: the buffer to read1506+ * @from: lower offset of data to be read1507+ * @to: upper offset of data to be read1508+ * @st: state variable1509+ *1510+ * Initializes the specified state variable. Must be called before1511+ * invoking skb_seq_read() for the first time.1512+ */1513+void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,1514+ unsigned int to, struct skb_seq_state *st)1515+{1516+ st->lower_offset = from;1517+ st->upper_offset = to;1518+ st->root_skb = st->cur_skb = skb;1519+ st->frag_idx = st->stepped_offset = 0;1520+ st->frag_data = NULL;1521+}1522+1523+/**1524+ * skb_seq_read - Sequentially read skb data1525+ * @consumed: number of bytes consumed by the caller so far1526+ * @data: destination pointer for data to be returned1527+ * @st: state variable1528+ *1529+ * Reads a block of skb data at &consumed relative to the1530+ * lower offset specified to skb_prepare_seq_read(). Assigns1531+ * the head of the data block to &data and returns the length1532+ * of the block or 0 if the end of the skb data or the upper1533+ * offset has been reached.1534+ *1535+ * The caller is not required to consume all of the data1536+ * returned, i.e. &consumed is typically set to the number1537+ * of bytes already consumed and the next call to1538+ * skb_seq_read() will return the remaining part of the block.1539+ *1540+ * Note: The size of each block of data returned can be arbitary,1541+ * this limitation is the cost for zerocopy seqeuental1542+ * reads of potentially non linear data.1543+ *1544+ * Note: Fragment lists within fragments are not implemented1545+ * at the moment, state->root_skb could be replaced with1546+ * a stack for this purpose.1547+ */1548+unsigned int skb_seq_read(unsigned int consumed, const u8 **data,1549+ struct skb_seq_state *st)1550+{1551+ unsigned int block_limit, abs_offset = consumed + st->lower_offset;1552+ skb_frag_t *frag;1553+1554+ if (unlikely(abs_offset >= st->upper_offset))1555+ return 0;1556+1557+next_skb:1558+ block_limit = skb_headlen(st->cur_skb);1559+1560+ if (abs_offset < block_limit) {1561+ *data = st->cur_skb->data + abs_offset;1562+ return block_limit - abs_offset;1563+ }1564+1565+ if (st->frag_idx == 0 && !st->frag_data)1566+ st->stepped_offset += skb_headlen(st->cur_skb);1567+1568+ while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {1569+ frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];1570+ block_limit = frag->size + st->stepped_offset;1571+1572+ if (abs_offset < block_limit) {1573+ if (!st->frag_data)1574+ st->frag_data = kmap_skb_frag(frag);1575+1576+ *data = (u8 *) st->frag_data + frag->page_offset +1577+ (abs_offset - st->stepped_offset);1578+1579+ return block_limit - abs_offset;1580+ }1581+1582+ if (st->frag_data) {1583+ kunmap_skb_frag(st->frag_data);1584+ st->frag_data = NULL;1585+ }1586+1587+ st->frag_idx++;1588+ st->stepped_offset += frag->size;1589+ }1590+1591+ if (st->cur_skb->next) {1592+ st->cur_skb = st->cur_skb->next;1593+ st->frag_idx = 0;1594+ goto next_skb;1595+ } else if (st->root_skb == st->cur_skb &&1596+ skb_shinfo(st->root_skb)->frag_list) {1597+ st->cur_skb = skb_shinfo(st->root_skb)->frag_list;1598+ goto next_skb;1599+ }1600+1601+ return 0;1602+}1603+1604+/**1605+ * skb_abort_seq_read - Abort a sequential read of skb data1606+ * @st: state variable1607+ *1608+ * Must be called if skb_seq_read() was not called until it1609+ * returned 0.1610+ */1611+void skb_abort_seq_read(struct skb_seq_state *st)1612+{1613+ if (st->frag_data)1614+ kunmap_skb_frag(st->frag_data);1615+}1616+1617+#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))1618+1619+static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,1620+ struct ts_config *conf,1621+ struct ts_state *state)1622+{1623+ return skb_seq_read(offset, text, TS_SKB_CB(state));1624+}1625+1626+static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)1627+{1628+ skb_abort_seq_read(TS_SKB_CB(state));1629+}1630+1631+/**1632+ * skb_find_text - Find a text pattern in skb data1633+ * @skb: the buffer to look in1634+ * @from: search offset1635+ * @to: search limit1636+ * @config: textsearch configuration1637+ * @state: uninitialized textsearch state variable1638+ *1639+ * Finds a pattern in the skb data according to the specified1640+ * textsearch configuration. Use textsearch_next() to retrieve1641+ * subsequent occurrences of the pattern. Returns the offset1642+ * to the first occurrence or UINT_MAX if no match was found.1643+ */1644+unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,1645+ unsigned int to, struct ts_config *config,1646+ struct ts_state *state)1647+{1648+ config->get_next_block = skb_ts_get_next_block;1649+ config->finish = skb_ts_finish;1650+1651+ skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));1652+1653+ return textsearch_find(config, state);1654+}1655+1656void __init skb_init(void)1657{1658 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",···1538EXPORT_SYMBOL(skb_unlink);1539EXPORT_SYMBOL(skb_append);1540EXPORT_SYMBOL(skb_split);1541+EXPORT_SYMBOL(skb_prepare_seq_read);1542+EXPORT_SYMBOL(skb_seq_read);1543+EXPORT_SYMBOL(skb_abort_seq_read);1544+EXPORT_SYMBOL(skb_find_text);
···449 To compile this code as a module, choose M here: the450 module will be called em_meta.451000000000000452config NET_CLS_ACT453 bool "Packet ACTION"454 depends on EXPERIMENTAL && NET_CLS && NET_QOS
···449 To compile this code as a module, choose M here: the450 module will be called em_meta.451452+config NET_EMATCH_TEXT453+ tristate "Textsearch"454+ depends on NET_EMATCH455+ select TEXTSEARCH456+ ---help---457+ Say Y here if you want to be ablt to classify packets based on458+ textsearch comparisons. Please select the appropriate textsearch459+ algorithms in the Library section.460+461+ To compile this code as a module, choose M here: the462+ module will be called em_text.463+464config NET_CLS_ACT465 bool "Packet ACTION"466 depends on EXPERIMENTAL && NET_CLS && NET_QOS