Merge branch 'linus' into x86/urgent

+4496 -4680
+6
Documentation/filesystems/ext4.txt
··· 26 26 27 27 git://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git 28 28 29 + - Note that it is highly important to install the mke2fs.conf file 30 + that comes with the e2fsprogs 1.41.x sources in /etc/mke2fs.conf. If 31 + you have edited the /etc/mke2fs.conf file installed on your system, 32 + you will need to merge your changes with the version from e2fsprogs 33 + 1.41.x. 34 + 29 35 - Create a new filesystem using the ext4dev filesystem type: 30 36 31 37 # mke2fs -t ext4dev /dev/hda1
+47 -20
Documentation/ja_JP/HOWTO
··· 11 11 fork. So if you have any comments or updates for this file, please try 12 12 to update the original English file first. 13 13 14 - Last Updated: 2007/11/16 14 + Last Updated: 2008/08/21 15 15 ================================== 16 16 これは、 17 - linux-2.6.24/Documentation/HOWTO 17 + linux-2.6.27/Documentation/HOWTO 18 18 の和訳です。 19 19 20 20 翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ > 21 - 翻訳日: 2007/11/10 21 + 翻訳日: 2008/8/5 22 22 翻訳者: Tsugikazu Shibata <tshibata at ab dot jp dot nec dot com> 23 23 校正者: 松倉さん <nbh--mats at nifty dot com> 24 24 小林 雅典さん (Masanori Kobayasi) <zap03216 at nifty dot ne dot jp> ··· 287 287 に安定した状態にあると判断したときにリリースされます。目標は毎週新 288 288 しい -rc カーネルをリリースすることです。 289 289 290 - - 以下の URL で各 -rc リリースに存在する既知の後戻り問題のリスト 291 - が追跡されます- 292 - http://kernelnewbies.org/known_regressions 293 - 294 290 - このプロセスはカーネルが 「準備ができた」と考えられるまで継続しま 295 291 す。このプロセスはだいたい 6週間継続します。 292 + 293 + - 各リリースでの既知の後戻り問題(regression: このリリースの中で新規 294 + に作り込まれた問題を指す) はその都度 Linux-kernel メーリングリスト 295 + に投稿されます。ゴールとしては、カーネルが 「準備ができた」と宣言 296 + する前にこのリストの長さをゼロに減らすことですが、現実には、数個の 297 + 後戻り問題がリリース時にたびたび残ってしまいます。 296 298 297 299 Andrew Morton が Linux-kernel メーリングリストにカーネルリリースについ 298 300 て書いたことをここで言っておくことは価値があります- ··· 305 303 2.6.x.y -stable カーネルツリー 306 304 --------------------------- 307 305 308 - バージョンに4つ目の数字がついたカーネルは -stable カーネルです。これに 309 - は、2.6.x カーネルで見つかったセキュリティ問題や重大な後戻りに対する比 310 - 較的小さい重要な修正が含まれます。 306 + バージョン番号が4つの数字に分かれているカーネルは -stable カーネルです。 307 + これには、2.6.x カーネルで見つかったセキュリティ問題や重大な後戻りに対 308 + する比較的小さい重要な修正が含まれます。 311 309 312 310 これは、開発/実験的バージョンのテストに協力することに興味が無く、 313 311 最新の安定したカーネルを使いたいユーザに推奨するブランチです。 314 312 315 - もし、2.6.x.y カーネルが存在しない場合には、番号が一番大きい 2.6.x 316 - が最新の安定版カーネルです。 313 + もし、2.6.x.y カーネルが存在しない場合には、番号が一番大きい 2.6.x が 314 + 最新の安定版カーネルです。 317 315 318 - 2.6.x.y は "stable" チーム <stable@kernel.org> でメンテされており、だ 319 - いたい隔週でリリースされています。 316 + 2.6.x.y は "stable" チーム <stable@kernel.org> でメンテされており、必 317 + 要に応じてリリースされます。通常のリリース期間は 2週間毎ですが、差し迫っ 318 + た問題がなければもう少し長くなることもあります。セキュリティ関連の問題 319 + の場合はこれに対してだいたいの場合、すぐにリリースがされます。 320 320 321 321 カーネルツリーに入っている、Documentation/stable_kernel_rules.txt ファ 322 322 イルにはどのような種類の変更が -stable ツリーに受け入れ可能か、またリ ··· 345 341 メインラインへ入れるように Linus にプッシュします。 346 342 347 343 メインカーネルツリーに含めるために Linus に送る前に、すべての新しいパッ 348 - チが -mm ツリーでテストされることが強く推奨されます。 344 + チが -mm ツリーでテストされることが強く推奨されています。マージウィン 345 + ドウが開く前に -mm ツリーに現れなかったパッチはメインラインにマージさ 346 + れることは困難になります。 349 347 350 348 これらのカーネルは安定して動作すべきシステムとして使うのには適切ではあ 351 349 りませんし、カーネルブランチの中でももっとも動作にリスクが高いものです。 ··· 401 395 - pcmcia, Dominik Brodowski <linux@dominikbrodowski.net> 402 396 git.kernel.org:/pub/scm/linux/kernel/git/brodo/pcmcia-2.6.git 403 397 404 - - SCSI, James Bottomley <James.Bottomley@SteelEye.com> 398 + - SCSI, James Bottomley <James.Bottomley@hansenpartnership.com> 405 399 git.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git 406 400 401 + - x86, Ingo Molnar <mingo@elte.hu> 402 + git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git 403 + 407 404 quilt ツリー- 408 - - USB, PCI ドライバコアと I2C, Greg Kroah-Hartman <gregkh@suse.de> 405 + - USB, ドライバコアと I2C, Greg Kroah-Hartman <gregkh@suse.de> 409 406 kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 410 - - x86-64 と i386 の仲間 Andi Kleen <ak@suse.de> 411 407 412 408 その他のカーネルツリーは http://git.kernel.org/ と MAINTAINERS ファ 413 409 イルに一覧表があります。 ··· 420 412 bugzilla.kernel.org は Linux カーネル開発者がカーネルのバグを追跡する 421 413 場所です。ユーザは見つけたバグの全てをこのツールで報告すべきです。 422 414 どう kernel bugzilla を使うかの詳細は、以下を参照してください- 423 - http://test.kernel.org/bugzilla/faq.html 424 - 415 + http://bugzilla.kernel.org/page.cgi?id=faq.html 425 416 メインカーネルソースディレクトリにあるファイル REPORTING-BUGS はカーネ 426 417 ルバグらしいものについてどうレポートするかの良いテンプレートであり、問 427 418 題の追跡を助けるためにカーネル開発者にとってどんな情報が必要なのかの詳 428 419 細が書かれています。 420 + 421 + バグレポートの管理 422 + ------------------- 423 + 424 + あなたのハッキングのスキルを訓練する最高の方法のひとつに、他人がレポー 425 + トしたバグを修正することがあります。あなたがカーネルをより安定化させる 426 + こに寄与するということだけでなく、あなたは 現実の問題を修正することを 427 + 学び、自分のスキルも強化でき、また他の開発者があなたの存在に気がつき 428 + ます。バグを修正することは、多くの開発者の中から自分が功績をあげる最善 429 + の道です、なぜなら多くの人は他人のバグの修正に時間を浪費することを好ま 430 + ないからです。 431 + 432 + すでにレポートされたバグのために仕事をするためには、 433 + http://bugzilla.kernel.org に行ってください。もし今後のバグレポートに 434 + ついてアドバイスを受けたいのであれば、bugme-new メーリングリスト(新し 435 + いバグレポートだけがここにメールされる) または bugme-janitor メーリン 436 + グリスト(bugzilla の変更毎にここにメールされる)を購読できます。 437 + 438 + http://lists.linux-foundation.org/mailman/listinfo/bugme-new 439 + http://lists.linux-foundation.org/mailman/listinfo/bugme-janitors 429 440 430 441 メーリングリスト 431 442 -------------
+111
Documentation/ja_JP/SubmitChecklist
··· 1 + NOTE: 2 + This is a version of Documentation/SubmitChecklist into Japanese. 3 + This document is maintained by Takenori Nagano <t-nagano@ah.jp.nec.com> 4 + and the JF Project team <http://www.linux.or.jp/JF/>. 5 + If you find any difference between this document and the original file 6 + or a problem with the translation, 7 + please contact the maintainer of this file or JF project. 8 + 9 + Please also note that the purpose of this file is to be easier to read 10 + for non English (read: Japanese) speakers and is not intended as a 11 + fork. So if you have any comments or updates of this file, please try 12 + to update the original English file first. 13 + 14 + Last Updated: 2008/07/14 15 + ================================== 16 + これは、 17 + linux-2.6.26/Documentation/SubmitChecklist の和訳です。 18 + 19 + 翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ > 20 + 翻訳日: 2008/07/14 21 + 翻訳者: Takenori Nagano <t-nagano at ah dot jp dot nec dot com> 22 + 校正者: Masanori Kobayashi さん <zap03216 at nifty dot ne dot jp> 23 + ================================== 24 + 25 + 26 + Linux カーネルパッチ投稿者向けチェックリスト 27 + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 28 + 29 + 本書では、パッチをより素早く取り込んでもらいたい開発者が実践すべき基本的な事柄 30 + をいくつか紹介します。ここにある全ての事柄は、Documentation/SubmittingPatches 31 + などのLinuxカーネルパッチ投稿に際しての心得を補足するものです。 32 + 33 + 1: 妥当なCONFIGオプションや変更されたCONFIGオプション、つまり =y, =m, =n 34 + 全てで正しくビルドできることを確認してください。その際、gcc及びリンカが 35 + warningやerrorを出していないことも確認してください。 36 + 37 + 2: allnoconfig, allmodconfig オプションを用いて正しくビルドできることを 38 + 確認してください。 39 + 40 + 3: 手許のクロスコンパイルツールやOSDLのPLMのようなものを用いて、複数の 41 + アーキテクチャにおいても正しくビルドできることを確認してください。 42 + 43 + 4: 64bit長の'unsigned long'を使用しているppc64は、クロスコンパイルでの 44 + チェックに適当なアーキテクチャです。 45 + 46 + 5: カーネルコーディングスタイルに準拠しているかどうか確認してください(!) 47 + 48 + 6: CONFIGオプションの追加・変更をした場合には、CONFIGメニューが壊れていない 49 + ことを確認してください。 50 + 51 + 7: 新しくKconfigのオプションを追加する際には、必ずそのhelpも記述してください。 52 + 53 + 8: 適切なKconfigの依存関係を考えながら慎重にチェックしてください。 54 + ただし、この作業はマシンを使ったテストできちんと行うのがとても困難です。 55 + うまくやるには、自分の頭で考えることです。 56 + 57 + 9: sparseを利用してちゃんとしたコードチェックをしてください。 58 + 59 + 10: 'make checkstack' と 'make namespacecheck' を利用し、問題が発見されたら 60 + 修正してください。'make checkstack' は明示的に問題を示しませんが、どれか 61 + 1つの関数が512バイトより大きいスタックを使っていれば、修正すべき候補と 62 + なります。 63 + 64 + 11: グローバルなkernel API を説明する kernel-doc をソースの中に含めてください。 65 + ( staticな関数においては必須ではありませんが、含めてもらっても結構です ) 66 + そして、'make htmldocs' もしくは 'make mandocs' を利用して追記した 67 + ドキュメントのチェックを行い、問題が見つかった場合には修正を行ってください。 68 + 69 + 12: CONFIG_PREEMPT, CONFIG_DEBUG_PREEMPT, CONFIG_DEBUG_SLAB, 70 + CONFIG_DEBUG_PAGEALLOC, CONFIG_DEBUG_MUTEXES, CONFIG_DEBUG_SPINLOCK, 71 + CONFIG_DEBUG_SPINLOCK_SLEEP これら全てを同時に有効にして動作確認を 72 + 行ってください。 73 + 74 + 13: CONFIG_SMP, CONFIG_PREEMPT を有効にした場合と無効にした場合の両方で 75 + ビルドした上、動作確認を行ってください。 76 + 77 + 14: もしパッチがディスクのI/O性能などに影響を与えるようであれば、 78 + 'CONFIG_LBD'オプションを有効にした場合と無効にした場合の両方で 79 + テストを実施してみてください。 80 + 81 + 15: lockdepの機能を全て有効にした上で、全てのコードパスを評価してください。 82 + 83 + 16: /proc に新しいエントリを追加した場合には、Documentation/ 配下に 84 + 必ずドキュメントを追加してください。 85 + 86 + 17: 新しいブートパラメータを追加した場合には、 87 + 必ずDocumentation/kernel-parameters.txt に説明を追加してください。 88 + 89 + 18: 新しくmoduleにパラメータを追加した場合には、MODULE_PARM_DESC()を 90 + 利用して必ずその説明を記述してください。 91 + 92 + 19: 新しいuserspaceインタフェースを作成した場合には、Documentation/ABI/ に 93 + Documentation/ABI/README を参考にして必ずドキュメントを追加してください。 94 + 95 + 20: 'make headers_check'を実行して全く問題がないことを確認してください。 96 + 97 + 21: 少なくともslabアロケーションとpageアロケーションに失敗した場合の 98 + 挙動について、fault-injectionを利用して確認してください。 99 + Documentation/fault-injection/ を参照してください。 100 + 101 + 追加したコードがかなりの量であったならば、サブシステム特有の 102 + fault-injectionを追加したほうが良いかもしれません。 103 + 104 + 22: 新たに追加したコードは、`gcc -W'でコンパイルしてください。 105 + このオプションは大量の不要なメッセージを出力しますが、 106 + "warning: comparison between signed and unsigned" のようなメッセージは、 107 + バグを見つけるのに役に立ちます。 108 + 109 + 23: 投稿したパッチが -mm パッチセットにマージされた後、全ての既存のパッチや 110 + VM, VFS およびその他のサブシステムに関する様々な変更と、現時点でも共存 111 + できることを確認するテストを行ってください。
+5
Documentation/kernel-parameters.txt
··· 365 365 no delay (0). 366 366 Format: integer 367 367 368 + bootmem_debug [KNL] Enable bootmem allocator debug messages. 369 + 368 370 bttv.card= [HW,V4L] bttv (bt848 + bt878 based grabber cards) 369 371 bttv.radio= Most important insmod options are available as 370 372 kernel args too. ··· 1073 1071 allowed. 1074 1072 1075 1073 * [no]ncq: Turn on or off NCQ. 1074 + 1075 + * nohrst, nosrst, norst: suppress hard, soft 1076 + and both resets. 1076 1077 1077 1078 If there are multiple matching configurations changing 1078 1079 the same attribute, the last one is used.
+5
Documentation/rfkill.txt
··· 363 363 when possible) the overall transmitter rfkill state, not of a particular rfkill 364 364 line. 365 365 366 + 5. During suspend, the rfkill class will attempt to soft-block the radio 367 + through a call to rfkill->toggle_radio, and will try to restore its previous 368 + state during resume. After a rfkill class is suspended, it will *not* call 369 + rfkill->toggle_radio until it is resumed. 370 + 366 371 Example of a WLAN wireless driver connected to the rfkill subsystem: 367 372 -------------------------------------------------------------------- 368 373
+23
Documentation/scsi/ChangeLog.megaraid_sas
··· 1 + 2 + 1 Release Date : Thur.July. 24 11:41:51 PST 2008 - 3 + (emaild-id:megaraidlinux@lsi.com) 4 + Sumant Patro 5 + Bo Yang 6 + 7 + 2 Current Version : 00.00.04.01 8 + 3 Older Version : 00.00.03.22 9 + 10 + 1. Add the new controller (0078, 0079) support to the driver 11 + Those controllers are LSI's next generatation(gen2) SAS controllers. 12 + 13 + 1 Release Date : Mon.June. 23 10:12:45 PST 2008 - 14 + (emaild-id:megaraidlinux@lsi.com) 15 + Sumant Patro 16 + Bo Yang 17 + 18 + 2 Current Version : 00.00.03.22 19 + 3 Older Version : 00.00.03.20 20 + 21 + 1. Add shutdown DCMD cmd to the shutdown routine to make FW shutdown proper. 22 + 2. Unexpected interrupt occurs in HWR Linux driver, add the dumy readl pci flush will fix this issue. 23 + 1 24 1 Release Date : Mon. March 10 11:02:31 PDT 2008 - 2 25 (emaild-id:megaraidlinux@lsi.com) 3 26 Sumant Patro
+8 -83
MAINTAINERS
··· 942 942 L: linux-mtd@lists.infradead.org 943 943 S: Maintained 944 944 945 + BLUETOOTH DRIVERS 946 + P: Marcel Holtmann 947 + M: marcel@holtmann.org 948 + L: linux-bluetooth@vger.kernel.org 949 + W: http://www.bluez.org/ 950 + S: Maintained 951 + 945 952 BLUETOOTH SUBSYSTEM 946 953 P: Marcel Holtmann 947 954 M: marcel@holtmann.org 948 - P: Maxim Krasnyansky 949 - M: maxk@qualcomm.com 950 955 L: linux-bluetooth@vger.kernel.org 951 - W: http://bluez.sf.net 952 - W: http://www.bluez.org 953 - W: http://www.holtmann.org/linux/bluetooth/ 956 + W: http://www.bluez.org/ 954 957 T: git kernel.org:/pub/scm/linux/kernel/git/holtmann/bluetooth-2.6.git 955 - S: Maintained 956 - 957 - BLUETOOTH RFCOMM LAYER 958 - P: Marcel Holtmann 959 - M: marcel@holtmann.org 960 - P: Maxim Krasnyansky 961 - M: maxk@qualcomm.com 962 - S: Maintained 963 - 964 - BLUETOOTH BNEP LAYER 965 - P: Marcel Holtmann 966 - M: marcel@holtmann.org 967 - P: Maxim Krasnyansky 968 - M: maxk@qualcomm.com 969 - S: Maintained 970 - 971 - BLUETOOTH CMTP LAYER 972 - P: Marcel Holtmann 973 - M: marcel@holtmann.org 974 - S: Maintained 975 - 976 - BLUETOOTH HIDP LAYER 977 - P: Marcel Holtmann 978 - M: marcel@holtmann.org 979 - S: Maintained 980 - 981 - BLUETOOTH HCI UART DRIVER 982 - P: Marcel Holtmann 983 - M: marcel@holtmann.org 984 - P: Maxim Krasnyansky 985 - M: maxk@qualcomm.com 986 - S: Maintained 987 - 988 - BLUETOOTH HCI USB DRIVER 989 - P: Marcel Holtmann 990 - M: marcel@holtmann.org 991 - P: Maxim Krasnyansky 992 - M: maxk@qualcomm.com 993 - S: Maintained 994 - 995 - BLUETOOTH HCI BCM203X DRIVER 996 - P: Marcel Holtmann 997 - M: marcel@holtmann.org 998 - S: Maintained 999 - 1000 - BLUETOOTH HCI BPA10X DRIVER 1001 - P: Marcel Holtmann 1002 - M: marcel@holtmann.org 1003 - S: Maintained 1004 - 1005 - BLUETOOTH HCI BFUSB DRIVER 1006 - P: Marcel Holtmann 1007 - M: marcel@holtmann.org 1008 - S: Maintained 1009 - 1010 - BLUETOOTH HCI DTL1 DRIVER 1011 - P: Marcel Holtmann 1012 - M: marcel@holtmann.org 1013 - S: Maintained 1014 - 1015 - BLUETOOTH HCI BLUECARD DRIVER 1016 - P: Marcel Holtmann 1017 - M: marcel@holtmann.org 1018 - S: Maintained 1019 - 1020 - BLUETOOTH HCI BT3C DRIVER 1021 - P: Marcel Holtmann 1022 - M: marcel@holtmann.org 1023 - S: Maintained 1024 - 1025 - BLUETOOTH HCI BTUART DRIVER 1026 - P: Marcel Holtmann 1027 - M: marcel@holtmann.org 1028 - S: Maintained 1029 - 1030 - BLUETOOTH HCI VHCI DRIVER 1031 - P: Maxim Krasnyansky 1032 - M: maxk@qualcomm.com 1033 958 S: Maintained 1034 959 1035 960 BONDING DRIVER
+1 -1
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 27 4 - EXTRAVERSION = -rc3 4 + EXTRAVERSION = -rc4 5 5 NAME = Rotary Wombat 6 6 7 7 # *DOCUMENTATION*
+1 -1
arch/arm/mach-integrator/impd1.c
··· 405 405 406 406 ret = amba_device_register(d, &dev->resource); 407 407 if (ret) { 408 - dev_err(&d->dev, "unable to register device: %d\n"); 408 + dev_err(&d->dev, "unable to register device: %d\n", ret); 409 409 kfree(d); 410 410 } 411 411 }
+2 -2
arch/arm/mach-ixp4xx/fsg-setup.c
··· 64 64 65 65 static struct i2c_board_info __initdata fsg_i2c_board_info [] = { 66 66 { 67 - I2C_BOARD_INFO("rtc-isl1208", 0x6f), 67 + I2C_BOARD_INFO("isl1208", 0x6f), 68 68 }, 69 69 }; 70 70 ··· 179 179 { 180 180 DECLARE_MAC_BUF(mac_buf); 181 181 uint8_t __iomem *f; 182 - int i; 183 182 184 183 ixp4xx_sys_init(); 185 184 ··· 227 228 f = ioremap(IXP4XX_EXP_BUS_BASE(0), 0x400000); 228 229 if (f) { 229 230 #ifdef __ARMEB__ 231 + int i; 230 232 for (i = 0; i < 6; i++) { 231 233 fsg_plat_eth[0].hwaddr[i] = readb(f + 0x3C0422 + i); 232 234 fsg_plat_eth[1].hwaddr[i] = readb(f + 0x3C043B + i);
+25
arch/arm/mach-pxa/clock.c
··· 125 125 list_add(&clks[i].node, &clocks); 126 126 mutex_unlock(&clocks_mutex); 127 127 } 128 + 129 + int clk_add_alias(char *alias, struct device *alias_dev, char *id, 130 + struct device *dev) 131 + { 132 + struct clk *r = clk_lookup(dev, id); 133 + struct clk *new; 134 + 135 + if (!r) 136 + return -ENODEV; 137 + 138 + new = kzalloc(sizeof(struct clk), GFP_KERNEL); 139 + 140 + if (!new) 141 + return -ENOMEM; 142 + 143 + new->name = alias; 144 + new->dev = alias_dev; 145 + new->other = r; 146 + 147 + mutex_lock(&clocks_mutex); 148 + list_add(&new->node, &clocks); 149 + mutex_unlock(&clocks_mutex); 150 + 151 + return 0; 152 + }
+5
arch/arm/mach-pxa/clock.h
··· 1 + #include <linux/list.h> 2 + 1 3 struct clk; 2 4 3 5 struct clkops { ··· 88 86 #endif 89 87 90 88 void clks_register(struct clk *clks, size_t num); 89 + int clk_add_alias(char *alias, struct device *alias_dev, char *id, 90 + struct device *dev); 91 +
+121 -49
arch/arm/mach-pxa/eseries.c
··· 10 10 * 11 11 */ 12 12 13 + #include <linux/kernel.h> 13 14 #include <linux/init.h> 14 15 15 16 #include <asm/setup.h> 16 17 #include <asm/mach/arch.h> 17 - #include <mach/hardware.h> 18 18 #include <asm/mach-types.h> 19 + 20 + #include <mach/mfp-pxa25x.h> 21 + #include <mach/hardware.h> 19 22 20 23 #include "generic.h" 21 24 25 + static unsigned long e740_pin_config[] __initdata = { 26 + /* Chip selects */ 27 + GPIO15_nCS_1, /* CS1 - Flash */ 28 + GPIO79_nCS_3, /* CS3 - IMAGEON */ 29 + GPIO80_nCS_4, /* CS4 - TMIO */ 30 + 31 + /* Clocks */ 32 + GPIO12_32KHz, 33 + 34 + /* BTUART */ 35 + GPIO42_BTUART_RXD, 36 + GPIO43_BTUART_TXD, 37 + GPIO44_BTUART_CTS, 38 + GPIO45_GPIO, /* Used by TMIO for #SUSPEND */ 39 + 40 + /* PC Card */ 41 + GPIO8_GPIO, /* CD0 */ 42 + GPIO44_GPIO, /* CD1 */ 43 + GPIO11_GPIO, /* IRQ0 */ 44 + GPIO6_GPIO, /* IRQ1 */ 45 + GPIO27_GPIO, /* RST0 */ 46 + GPIO24_GPIO, /* RST1 */ 47 + GPIO20_GPIO, /* PWR0 */ 48 + GPIO23_GPIO, /* PWR1 */ 49 + GPIO48_nPOE, 50 + GPIO49_nPWE, 51 + GPIO50_nPIOR, 52 + GPIO51_nPIOW, 53 + GPIO52_nPCE_1, 54 + GPIO53_nPCE_2, 55 + GPIO54_nPSKTSEL, 56 + GPIO55_nPREG, 57 + GPIO56_nPWAIT, 58 + GPIO57_nIOIS16, 59 + 60 + /* wakeup */ 61 + GPIO0_GPIO | WAKEUP_ON_EDGE_RISE, 62 + }; 63 + 64 + static unsigned long e400_pin_config[] __initdata = { 65 + /* Chip selects */ 66 + GPIO15_nCS_1, /* CS1 - Flash */ 67 + GPIO80_nCS_4, /* CS4 - TMIO */ 68 + 69 + /* Clocks */ 70 + GPIO12_32KHz, 71 + 72 + /* BTUART */ 73 + GPIO42_BTUART_RXD, 74 + GPIO43_BTUART_TXD, 75 + GPIO44_BTUART_CTS, 76 + GPIO45_GPIO, /* Used by TMIO for #SUSPEND */ 77 + 78 + /* wakeup */ 79 + GPIO0_GPIO | WAKEUP_ON_EDGE_RISE, 80 + }; 81 + 22 82 /* Only e800 has 128MB RAM */ 23 83 static void __init eseries_fixup(struct machine_desc *desc, 24 - struct tag *tags, char **cmdline, struct meminfo *mi) 84 + struct tag *tags, char **cmdline, struct meminfo *mi) 25 85 { 26 86 mi->nr_banks=1; 27 87 mi->bank[0].start = 0xa0000000; ··· 92 32 mi->bank[0].size = (64*1024*1024); 93 33 } 94 34 35 + static void __init e740_init(void) 36 + { 37 + pxa2xx_mfp_config(ARRAY_AND_SIZE(e740_pin_config)); 38 + } 39 + 40 + static void __init e400_init(void) 41 + { 42 + pxa2xx_mfp_config(ARRAY_AND_SIZE(e400_pin_config)); 43 + } 44 + 95 45 /* e-series machine definitions */ 96 46 97 47 #ifdef CONFIG_MACH_E330 98 48 MACHINE_START(E330, "Toshiba e330") 99 - /* Maintainer: Ian Molton (spyro@f2s.com) */ 100 - .phys_io = 0x40000000, 101 - .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, 102 - .boot_params = 0xa0000100, 103 - .map_io = pxa_map_io, 104 - .init_irq = pxa25x_init_irq, 105 - .fixup = eseries_fixup, 106 - .timer = &pxa_timer, 49 + /* Maintainer: Ian Molton (spyro@f2s.com) */ 50 + .phys_io = 0x40000000, 51 + .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, 52 + .boot_params = 0xa0000100, 53 + .map_io = pxa_map_io, 54 + .init_irq = pxa25x_init_irq, 55 + .fixup = eseries_fixup, 56 + .timer = &pxa_timer, 107 57 MACHINE_END 108 58 #endif 109 59 110 60 #ifdef CONFIG_MACH_E350 111 61 MACHINE_START(E350, "Toshiba e350") 112 62 /* Maintainer: Ian Molton (spyro@f2s.com) */ 113 - .phys_io = 0x40000000, 114 - .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, 115 - .boot_params = 0xa0000100, 116 - .map_io = pxa_map_io, 117 - .init_irq = pxa25x_init_irq, 118 - .fixup = eseries_fixup, 119 - .timer = &pxa_timer, 63 + .phys_io = 0x40000000, 64 + .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, 65 + .boot_params = 0xa0000100, 66 + .map_io = pxa_map_io, 67 + .init_irq = pxa25x_init_irq, 68 + .fixup = eseries_fixup, 69 + .timer = &pxa_timer, 120 70 MACHINE_END 121 71 #endif 122 72 123 73 #ifdef CONFIG_MACH_E740 124 74 MACHINE_START(E740, "Toshiba e740") 125 - /* Maintainer: Ian Molton (spyro@f2s.com) */ 126 - .phys_io = 0x40000000, 127 - .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, 128 - .boot_params = 0xa0000100, 129 - .map_io = pxa_map_io, 130 - .init_irq = pxa25x_init_irq, 131 - .fixup = eseries_fixup, 132 - .timer = &pxa_timer, 75 + /* Maintainer: Ian Molton (spyro@f2s.com) */ 76 + .phys_io = 0x40000000, 77 + .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, 78 + .boot_params = 0xa0000100, 79 + .map_io = pxa_map_io, 80 + .init_irq = pxa25x_init_irq, 81 + .fixup = eseries_fixup, 82 + .init_machine = e740_init, 83 + .timer = &pxa_timer, 133 84 MACHINE_END 134 85 #endif 135 86 136 87 #ifdef CONFIG_MACH_E750 137 88 MACHINE_START(E750, "Toshiba e750") 138 - /* Maintainer: Ian Molton (spyro@f2s.com) */ 139 - .phys_io = 0x40000000, 140 - .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, 141 - .boot_params = 0xa0000100, 142 - .map_io = pxa_map_io, 143 - .init_irq = pxa25x_init_irq, 144 - .fixup = eseries_fixup, 145 - .timer = &pxa_timer, 89 + /* Maintainer: Ian Molton (spyro@f2s.com) */ 90 + .phys_io = 0x40000000, 91 + .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, 92 + .boot_params = 0xa0000100, 93 + .map_io = pxa_map_io, 94 + .init_irq = pxa25x_init_irq, 95 + .fixup = eseries_fixup, 96 + .timer = &pxa_timer, 146 97 MACHINE_END 147 98 #endif 148 99 149 100 #ifdef CONFIG_MACH_E400 150 101 MACHINE_START(E400, "Toshiba e400") 151 - /* Maintainer: Ian Molton (spyro@f2s.com) */ 152 - .phys_io = 0x40000000, 153 - .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, 154 - .boot_params = 0xa0000100, 155 - .map_io = pxa_map_io, 156 - .init_irq = pxa25x_init_irq, 157 - .fixup = eseries_fixup, 158 - .timer = &pxa_timer, 102 + /* Maintainer: Ian Molton (spyro@f2s.com) */ 103 + .phys_io = 0x40000000, 104 + .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, 105 + .boot_params = 0xa0000100, 106 + .map_io = pxa_map_io, 107 + .init_irq = pxa25x_init_irq, 108 + .fixup = eseries_fixup, 109 + .init_machine = e400_init, 110 + .timer = &pxa_timer, 159 111 MACHINE_END 160 112 #endif 161 113 162 114 #ifdef CONFIG_MACH_E800 163 115 MACHINE_START(E800, "Toshiba e800") 164 - /* Maintainer: Ian Molton (spyro@f2s.com) */ 165 - .phys_io = 0x40000000, 166 - .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, 167 - .boot_params = 0xa0000100, 168 - .map_io = pxa_map_io, 169 - .init_irq = pxa25x_init_irq, 170 - .fixup = eseries_fixup, 171 - .timer = &pxa_timer, 116 + /* Maintainer: Ian Molton (spyro@f2s.com) */ 117 + .phys_io = 0x40000000, 118 + .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, 119 + .boot_params = 0xa0000100, 120 + .map_io = pxa_map_io, 121 + .init_irq = pxa25x_init_irq, 122 + .fixup = eseries_fixup, 123 + .timer = &pxa_timer, 172 124 MACHINE_END 173 125 #endif 174 126
+1
arch/arm/mach-pxa/include/mach/irqs.h
··· 183 183 defined(CONFIG_MACH_TOSA) || \ 184 184 defined(CONFIG_MACH_MAINSTONE) || \ 185 185 defined(CONFIG_MACH_PCM027) || \ 186 + defined(CONFIG_ARCH_PXA_ESERIES) || \ 186 187 defined(CONFIG_MACH_MAGICIAN) 187 188 #define NR_IRQS (IRQ_BOARD_END) 188 189 #elif defined(CONFIG_MACH_ZYLONITE)
+2
arch/arm/mach-pxa/lubbock.c
··· 52 52 #include <mach/mmc.h> 53 53 54 54 #include "generic.h" 55 + #include "clock.h" 55 56 #include "devices.h" 56 57 57 58 static unsigned long lubbock_pin_config[] __initdata = { ··· 486 485 487 486 pxa2xx_mfp_config(ARRAY_AND_SIZE(lubbock_pin_config)); 488 487 488 + clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL); 489 489 pxa_set_udc_info(&udc_info); 490 490 set_pxa_fb_info(&sharp_lm8v31); 491 491 pxa_set_mci_info(&lubbock_mci_platform_data);
+1 -9
arch/arm/mach-pxa/pxa25x.c
··· 166 166 ; 167 167 168 168 /* 169 - * PXA 2xx clock declarations. Order is important (see aliases below) 170 - * Please be careful not to disrupt the ordering. 169 + * PXA 2xx clock declarations. 171 170 */ 172 171 static struct clk pxa25x_clks[] = { 173 172 INIT_CK("LCDCLK", LCD, &clk_pxa25x_lcd_ops, &pxa_device_fb.dev), ··· 191 192 INIT_CKEN("I2SCLK", I2S, 14745600, 0, NULL), 192 193 */ 193 194 INIT_CKEN("FICPCLK", FICP, 47923000, 0, NULL), 194 - }; 195 - 196 - static struct clk pxa2xx_clk_aliases[] = { 197 - INIT_CKOTHER("GPIO7_CLK", &pxa25x_clks[4], NULL), 198 - INIT_CKOTHER("SA1111_CLK", &pxa25x_clks[5], NULL), 199 195 }; 200 196 201 197 #ifdef CONFIG_PM ··· 368 374 /* Only add HWUART for PXA255/26x; PXA210/250/27x do not have it. */ 369 375 if (cpu_is_pxa255()) 370 376 ret = platform_device_register(&pxa_device_hwuart); 371 - 372 - clks_register(pxa2xx_clk_aliases, ARRAY_SIZE(pxa2xx_clk_aliases)); 373 377 374 378 return ret; 375 379 }
+2
arch/arm/mach-pxa/pxa300.c
··· 90 90 }; 91 91 92 92 static struct clk pxa310_clks[] = { 93 + #ifdef CONFIG_CPU_PXA310 93 94 PXA3xx_CKEN("MMCCLK", MMC3, 19500000, 0, &pxa3xx_device_mci3.dev), 95 + #endif 94 96 }; 95 97 96 98 static int __init pxa300_init(void)
-1
arch/arm/plat-omap/clock.c
··· 10 10 * it under the terms of the GNU General Public License version 2 as 11 11 * published by the Free Software Foundation. 12 12 */ 13 - #include <linux/version.h> 14 13 #include <linux/kernel.h> 15 14 #include <linux/init.h> 16 15 #include <linux/module.h>
-2
arch/cris/arch-v32/kernel/fasttimer.c
··· 19 19 #include <asm/irq.h> 20 20 #include <asm/system.h> 21 21 22 - #include <linux/version.h> 23 - 24 22 #include <hwregs/reg_map.h> 25 23 #include <hwregs/reg_rdwr.h> 26 24 #include <hwregs/timer_defs.h>
-1285
arch/ia64/configs/sn2_defconfig
··· 1 - # 2 - # Automatically generated make config: don't edit 3 - # Linux kernel version: 2.6.23 4 - # Thu Oct 18 16:03:40 2007 5 - # 6 - CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 7 - 8 - # 9 - # General setup 10 - # 11 - CONFIG_EXPERIMENTAL=y 12 - CONFIG_LOCK_KERNEL=y 13 - CONFIG_INIT_ENV_ARG_LIMIT=32 14 - CONFIG_LOCALVERSION="" 15 - # CONFIG_LOCALVERSION_AUTO is not set 16 - CONFIG_SWAP=y 17 - CONFIG_SYSVIPC=y 18 - CONFIG_SYSVIPC_SYSCTL=y 19 - CONFIG_POSIX_MQUEUE=y 20 - # CONFIG_BSD_PROCESS_ACCT is not set 21 - CONFIG_TASKSTATS=y 22 - # CONFIG_TASK_DELAY_ACCT is not set 23 - CONFIG_TASK_XACCT=y 24 - CONFIG_TASK_IO_ACCOUNTING=y 25 - # CONFIG_USER_NS is not set 26 - # CONFIG_AUDIT is not set 27 - # CONFIG_IKCONFIG is not set 28 - CONFIG_LOG_BUF_SHIFT=20 29 - CONFIG_CGROUPS=y 30 - CONFIG_CPUSETS=y 31 - CONFIG_FAIR_GROUP_SCHED=y 32 - CONFIG_FAIR_USER_SCHED=y 33 - CONFIG_SYSFS_DEPRECATED=y 34 - CONFIG_RELAY=y 35 - CONFIG_BLK_DEV_INITRD=y 36 - CONFIG_INITRAMFS_SOURCE="" 37 - CONFIG_CC_OPTIMIZE_FOR_SIZE=y 38 - CONFIG_SYSCTL=y 39 - # CONFIG_EMBEDDED is not set 40 - CONFIG_SYSCTL_SYSCALL=y 41 - CONFIG_KALLSYMS=y 42 - CONFIG_KALLSYMS_ALL=y 43 - # CONFIG_KALLSYMS_EXTRA_PASS is not set 44 - CONFIG_HOTPLUG=y 45 - CONFIG_PRINTK=y 46 - CONFIG_BUG=y 47 - CONFIG_ELF_CORE=y 48 - CONFIG_BASE_FULL=y 49 - CONFIG_FUTEX=y 50 - CONFIG_ANON_INODES=y 51 - CONFIG_EPOLL=y 52 - CONFIG_SIGNALFD=y 53 - CONFIG_EVENTFD=y 54 - CONFIG_SHMEM=y 55 - CONFIG_VM_EVENT_COUNTERS=y 56 - CONFIG_SLUB_DEBUG=y 57 - # CONFIG_SLAB is not set 58 - CONFIG_SLUB=y 59 - # CONFIG_SLOB is not set 60 - CONFIG_RT_MUTEXES=y 61 - # CONFIG_TINY_SHMEM is not set 62 - CONFIG_BASE_SMALL=0 63 - CONFIG_MODULES=y 64 - CONFIG_MODULE_UNLOAD=y 65 - # CONFIG_MODULE_FORCE_UNLOAD is not set 66 - # CONFIG_MODVERSIONS is not set 67 - # CONFIG_MODULE_SRCVERSION_ALL is not set 68 - CONFIG_KMOD=y 69 - CONFIG_STOP_MACHINE=y 70 - CONFIG_BLOCK=y 71 - # CONFIG_BLK_DEV_IO_TRACE is not set 72 - CONFIG_BLK_DEV_BSG=y 73 - CONFIG_BLOCK_COMPAT=y 74 - 75 - # 76 - # IO Schedulers 77 - # 78 - CONFIG_IOSCHED_NOOP=y 79 - CONFIG_IOSCHED_AS=y 80 - CONFIG_IOSCHED_DEADLINE=y 81 - CONFIG_IOSCHED_CFQ=y 82 - CONFIG_DEFAULT_AS=y 83 - # CONFIG_DEFAULT_DEADLINE is not set 84 - # CONFIG_DEFAULT_CFQ is not set 85 - # CONFIG_DEFAULT_NOOP is not set 86 - CONFIG_DEFAULT_IOSCHED="anticipatory" 87 - 88 - # 89 - # Processor type and features 90 - # 91 - CONFIG_IA64=y 92 - CONFIG_64BIT=y 93 - CONFIG_QUICKLIST=y 94 - CONFIG_MMU=y 95 - CONFIG_RWSEM_XCHGADD_ALGORITHM=y 96 - # CONFIG_ARCH_HAS_ILOG2_U32 is not set 97 - # CONFIG_ARCH_HAS_ILOG2_U64 is not set 98 - CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y 99 - CONFIG_GENERIC_FIND_NEXT_BIT=y 100 - CONFIG_GENERIC_CALIBRATE_DELAY=y 101 - CONFIG_GENERIC_TIME=y 102 - CONFIG_GENERIC_TIME_VSYSCALL=y 103 - CONFIG_DMI=y 104 - CONFIG_EFI=y 105 - CONFIG_GENERIC_IOMAP=y 106 - CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y 107 - CONFIG_IA64_UNCACHED_ALLOCATOR=y 108 - CONFIG_AUDIT_ARCH=y 109 - # CONFIG_IA64_GENERIC is not set 110 - # CONFIG_IA64_DIG is not set 111 - # CONFIG_IA64_HP_ZX1 is not set 112 - # CONFIG_IA64_HP_ZX1_SWIOTLB is not set 113 - CONFIG_IA64_SGI_SN2=y 114 - # CONFIG_IA64_HP_SIM is not set 115 - # CONFIG_ITANIUM is not set 116 - CONFIG_MCKINLEY=y 117 - # CONFIG_IA64_PAGE_SIZE_4KB is not set 118 - # CONFIG_IA64_PAGE_SIZE_8KB is not set 119 - # CONFIG_IA64_PAGE_SIZE_16KB is not set 120 - CONFIG_IA64_PAGE_SIZE_64KB=y 121 - CONFIG_PGTABLE_3=y 122 - # CONFIG_PGTABLE_4 is not set 123 - # CONFIG_HZ_100 is not set 124 - CONFIG_HZ_250=y 125 - # CONFIG_HZ_300 is not set 126 - # CONFIG_HZ_1000 is not set 127 - CONFIG_HZ=250 128 - CONFIG_IA64_L1_CACHE_SHIFT=7 129 - # CONFIG_IA64_CYCLONE is not set 130 - CONFIG_IOSAPIC=y 131 - CONFIG_IA64_SGI_SN_XP=m 132 - CONFIG_FORCE_MAX_ZONEORDER=17 133 - CONFIG_SMP=y 134 - CONFIG_NR_CPUS=1024 135 - # CONFIG_HOTPLUG_CPU is not set 136 - CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y 137 - CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y 138 - CONFIG_SCHED_SMT=y 139 - CONFIG_PREEMPT_NONE=y 140 - # CONFIG_PREEMPT_VOLUNTARY is not set 141 - # CONFIG_PREEMPT is not set 142 - CONFIG_PREEMPT_BKL=y 143 - CONFIG_SELECT_MEMORY_MODEL=y 144 - # CONFIG_FLATMEM_MANUAL is not set 145 - CONFIG_DISCONTIGMEM_MANUAL=y 146 - # CONFIG_SPARSEMEM_MANUAL is not set 147 - CONFIG_DISCONTIGMEM=y 148 - CONFIG_FLAT_NODE_MEM_MAP=y 149 - CONFIG_NEED_MULTIPLE_NODES=y 150 - # CONFIG_SPARSEMEM_STATIC is not set 151 - CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y 152 - CONFIG_SPLIT_PTLOCK_CPUS=4 153 - CONFIG_MIGRATION=y 154 - CONFIG_RESOURCES_64BIT=y 155 - CONFIG_ZONE_DMA_FLAG=0 156 - CONFIG_NR_QUICK=1 157 - CONFIG_VIRT_TO_BUS=y 158 - CONFIG_ARCH_SELECT_MEMORY_MODEL=y 159 - CONFIG_ARCH_DISCONTIGMEM_ENABLE=y 160 - CONFIG_ARCH_FLATMEM_ENABLE=y 161 - CONFIG_ARCH_SPARSEMEM_ENABLE=y 162 - CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y 163 - CONFIG_NUMA=y 164 - CONFIG_NODES_SHIFT=10 165 - CONFIG_ARCH_POPULATES_NODE_MAP=y 166 - CONFIG_VIRTUAL_MEM_MAP=y 167 - CONFIG_HOLES_IN_ZONE=y 168 - CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y 169 - CONFIG_HAVE_ARCH_NODEDATA_EXTENSION=y 170 - CONFIG_IA32_SUPPORT=y 171 - CONFIG_COMPAT=y 172 - CONFIG_COMPAT_FOR_U64_ALIGNMENT=y 173 - CONFIG_IA64_MCA_RECOVERY=y 174 - CONFIG_PERFMON=y 175 - CONFIG_IA64_PALINFO=y 176 - CONFIG_IA64_MC_ERR_INJECT=y 177 - CONFIG_SGI_SN=y 178 - # CONFIG_IA64_ESI is not set 179 - # CONFIG_IA64_HP_AML_NFW is not set 180 - 181 - # 182 - # SN Devices 183 - # 184 - CONFIG_SGI_IOC3=y 185 - 186 - # 187 - # Firmware Drivers 188 - # 189 - CONFIG_EFI_VARS=y 190 - CONFIG_EFI_PCDP=y 191 - CONFIG_DMIID=y 192 - CONFIG_BINFMT_ELF=y 193 - # CONFIG_BINFMT_MISC is not set 194 - 195 - # 196 - # Power management and ACPI 197 - # 198 - CONFIG_PM=y 199 - # CONFIG_PM_LEGACY is not set 200 - # CONFIG_PM_DEBUG is not set 201 - CONFIG_ACPI=y 202 - # CONFIG_ACPI_PROCFS is not set 203 - CONFIG_ACPI_PROC_EVENT=y 204 - # CONFIG_ACPI_BUTTON is not set 205 - # CONFIG_ACPI_FAN is not set 206 - # CONFIG_ACPI_DOCK is not set 207 - # CONFIG_ACPI_PROCESSOR is not set 208 - CONFIG_ACPI_NUMA=y 209 - CONFIG_ACPI_BLACKLIST_YEAR=0 210 - # CONFIG_ACPI_DEBUG is not set 211 - CONFIG_ACPI_EC=y 212 - CONFIG_ACPI_POWER=y 213 - CONFIG_ACPI_SYSTEM=y 214 - # CONFIG_ACPI_CONTAINER is not set 215 - 216 - # 217 - # CPU Frequency scaling 218 - # 219 - # CONFIG_CPU_FREQ is not set 220 - 221 - # 222 - # Bus options (PCI, PCMCIA) 223 - # 224 - CONFIG_PCI=y 225 - CONFIG_PCI_DOMAINS=y 226 - CONFIG_PCI_SYSCALL=y 227 - CONFIG_PCIEPORTBUS=y 228 - CONFIG_HOTPLUG_PCI_PCIE=y 229 - CONFIG_PCIEAER=y 230 - CONFIG_ARCH_SUPPORTS_MSI=y 231 - # CONFIG_PCI_MSI is not set 232 - # CONFIG_PCI_DEBUG is not set 233 - CONFIG_HOTPLUG_PCI=y 234 - # CONFIG_HOTPLUG_PCI_FAKE is not set 235 - # CONFIG_HOTPLUG_PCI_ACPI is not set 236 - # CONFIG_HOTPLUG_PCI_CPCI is not set 237 - # CONFIG_HOTPLUG_PCI_SHPC is not set 238 - CONFIG_HOTPLUG_PCI_SGI=y 239 - # CONFIG_PCCARD is not set 240 - 241 - # 242 - # Networking 243 - # 244 - CONFIG_NET=y 245 - 246 - # 247 - # Networking options 248 - # 249 - CONFIG_PACKET=y 250 - CONFIG_PACKET_MMAP=y 251 - CONFIG_UNIX=y 252 - CONFIG_XFRM=y 253 - # CONFIG_XFRM_USER is not set 254 - # CONFIG_XFRM_SUB_POLICY is not set 255 - # CONFIG_XFRM_MIGRATE is not set 256 - # CONFIG_NET_KEY is not set 257 - CONFIG_INET=y 258 - CONFIG_IP_MULTICAST=y 259 - # CONFIG_IP_ADVANCED_ROUTER is not set 260 - CONFIG_IP_FIB_HASH=y 261 - # CONFIG_IP_PNP is not set 262 - # CONFIG_NET_IPIP is not set 263 - # CONFIG_NET_IPGRE is not set 264 - # CONFIG_IP_MROUTE is not set 265 - # CONFIG_ARPD is not set 266 - CONFIG_SYN_COOKIES=y 267 - # CONFIG_INET_AH is not set 268 - # CONFIG_INET_ESP is not set 269 - # CONFIG_INET_IPCOMP is not set 270 - # CONFIG_INET_XFRM_TUNNEL is not set 271 - CONFIG_INET_TUNNEL=m 272 - CONFIG_INET_XFRM_MODE_TRANSPORT=y 273 - CONFIG_INET_XFRM_MODE_TUNNEL=y 274 - CONFIG_INET_XFRM_MODE_BEET=y 275 - CONFIG_INET_LRO=y 276 - CONFIG_INET_DIAG=m 277 - CONFIG_INET_TCP_DIAG=m 278 - # CONFIG_TCP_CONG_ADVANCED is not set 279 - CONFIG_TCP_CONG_CUBIC=y 280 - CONFIG_DEFAULT_TCP_CONG="cubic" 281 - # CONFIG_TCP_MD5SIG is not set 282 - CONFIG_IPV6=m 283 - # CONFIG_IPV6_PRIVACY is not set 284 - # CONFIG_IPV6_ROUTER_PREF is not set 285 - # CONFIG_IPV6_OPTIMISTIC_DAD is not set 286 - # CONFIG_INET6_AH is not set 287 - # CONFIG_INET6_ESP is not set 288 - # CONFIG_INET6_IPCOMP is not set 289 - # CONFIG_IPV6_MIP6 is not set 290 - # CONFIG_INET6_XFRM_TUNNEL is not set 291 - # CONFIG_INET6_TUNNEL is not set 292 - CONFIG_INET6_XFRM_MODE_TRANSPORT=m 293 - CONFIG_INET6_XFRM_MODE_TUNNEL=m 294 - CONFIG_INET6_XFRM_MODE_BEET=m 295 - # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set 296 - CONFIG_IPV6_SIT=m 297 - # CONFIG_IPV6_TUNNEL is not set 298 - # CONFIG_IPV6_MULTIPLE_TABLES is not set 299 - # CONFIG_NETWORK_SECMARK is not set 300 - # CONFIG_NETFILTER is not set 301 - # CONFIG_IP_DCCP is not set 302 - # CONFIG_IP_SCTP is not set 303 - # CONFIG_TIPC is not set 304 - # CONFIG_ATM is not set 305 - # CONFIG_BRIDGE is not set 306 - # CONFIG_VLAN_8021Q is not set 307 - # CONFIG_DECNET is not set 308 - # CONFIG_LLC2 is not set 309 - # CONFIG_IPX is not set 310 - # CONFIG_ATALK is not set 311 - # CONFIG_X25 is not set 312 - # CONFIG_LAPB is not set 313 - # CONFIG_ECONET is not set 314 - # CONFIG_WAN_ROUTER is not set 315 - 316 - # 317 - # QoS and/or fair queueing 318 - # 319 - # CONFIG_NET_SCHED is not set 320 - 321 - # 322 - # Network testing 323 - # 324 - # CONFIG_NET_PKTGEN is not set 325 - # CONFIG_HAMRADIO is not set 326 - # CONFIG_IRDA is not set 327 - # CONFIG_BT is not set 328 - # CONFIG_AF_RXRPC is not set 329 - 330 - # 331 - # Wireless 332 - # 333 - # CONFIG_CFG80211 is not set 334 - # CONFIG_WIRELESS_EXT is not set 335 - # CONFIG_MAC80211 is not set 336 - # CONFIG_IEEE80211 is not set 337 - # CONFIG_RFKILL is not set 338 - # CONFIG_NET_9P is not set 339 - 340 - # 341 - # Device Drivers 342 - # 343 - 344 - # 345 - # Generic Driver Options 346 - # 347 - CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 348 - CONFIG_STANDALONE=y 349 - CONFIG_PREVENT_FIRMWARE_BUILD=y 350 - CONFIG_FW_LOADER=y 351 - # CONFIG_DEBUG_DRIVER is not set 352 - # CONFIG_DEBUG_DEVRES is not set 353 - # CONFIG_SYS_HYPERVISOR is not set 354 - # CONFIG_CONNECTOR is not set 355 - # CONFIG_MTD is not set 356 - # CONFIG_PARPORT is not set 357 - CONFIG_PNP=y 358 - # CONFIG_PNP_DEBUG is not set 359 - 360 - # 361 - # Protocols 362 - # 363 - CONFIG_PNPACPI=y 364 - CONFIG_BLK_DEV=y 365 - # CONFIG_BLK_CPQ_DA is not set 366 - # CONFIG_BLK_CPQ_CISS_DA is not set 367 - # CONFIG_BLK_DEV_DAC960 is not set 368 - # CONFIG_BLK_DEV_UMEM is not set 369 - # CONFIG_BLK_DEV_COW_COMMON is not set 370 - CONFIG_BLK_DEV_LOOP=y 371 - CONFIG_BLK_DEV_CRYPTOLOOP=m 372 - CONFIG_BLK_DEV_NBD=m 373 - # CONFIG_BLK_DEV_SX8 is not set 374 - # CONFIG_BLK_DEV_UB is not set 375 - CONFIG_BLK_DEV_RAM=y 376 - CONFIG_BLK_DEV_RAM_COUNT=16 377 - CONFIG_BLK_DEV_RAM_SIZE=4096 378 - CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 379 - # CONFIG_CDROM_PKTCDVD is not set 380 - CONFIG_ATA_OVER_ETH=m 381 - CONFIG_MISC_DEVICES=y 382 - # CONFIG_PHANTOM is not set 383 - # CONFIG_EEPROM_93CX6 is not set 384 - CONFIG_SGI_IOC4=y 385 - # CONFIG_TIFM_CORE is not set 386 - CONFIG_IDE=y 387 - CONFIG_IDE_MAX_HWIFS=4 388 - CONFIG_BLK_DEV_IDE=y 389 - 390 - # 391 - # Please see Documentation/ide.txt for help/info on IDE drives 392 - # 393 - # CONFIG_BLK_DEV_IDE_SATA is not set 394 - CONFIG_BLK_DEV_IDEDISK=y 395 - # CONFIG_IDEDISK_MULTI_MODE is not set 396 - CONFIG_BLK_DEV_IDECD=y 397 - # CONFIG_BLK_DEV_IDETAPE is not set 398 - # CONFIG_BLK_DEV_IDEFLOPPY is not set 399 - # CONFIG_BLK_DEV_IDESCSI is not set 400 - # CONFIG_BLK_DEV_IDEACPI is not set 401 - # CONFIG_IDE_TASK_IOCTL is not set 402 - CONFIG_IDE_PROC_FS=y 403 - 404 - # 405 - # IDE chipset support/bugfixes 406 - # 407 - CONFIG_IDE_GENERIC=y 408 - # CONFIG_BLK_DEV_PLATFORM is not set 409 - # CONFIG_BLK_DEV_IDEPNP is not set 410 - 411 - # 412 - # PCI IDE chipsets support 413 - # 414 - CONFIG_BLK_DEV_IDEPCI=y 415 - CONFIG_IDEPCI_SHARE_IRQ=y 416 - CONFIG_IDEPCI_PCIBUS_ORDER=y 417 - # CONFIG_BLK_DEV_OFFBOARD is not set 418 - # CONFIG_BLK_DEV_GENERIC is not set 419 - # CONFIG_BLK_DEV_OPTI621 is not set 420 - CONFIG_BLK_DEV_IDEDMA_PCI=y 421 - # CONFIG_BLK_DEV_AEC62XX is not set 422 - # CONFIG_BLK_DEV_ALI15X3 is not set 423 - # CONFIG_BLK_DEV_AMD74XX is not set 424 - # CONFIG_BLK_DEV_CMD64X is not set 425 - # CONFIG_BLK_DEV_TRIFLEX is not set 426 - # CONFIG_BLK_DEV_CY82C693 is not set 427 - # CONFIG_BLK_DEV_CS5520 is not set 428 - # CONFIG_BLK_DEV_CS5530 is not set 429 - # CONFIG_BLK_DEV_HPT34X is not set 430 - # CONFIG_BLK_DEV_HPT366 is not set 431 - # CONFIG_BLK_DEV_JMICRON is not set 432 - # CONFIG_BLK_DEV_SC1200 is not set 433 - # CONFIG_BLK_DEV_PIIX is not set 434 - # CONFIG_BLK_DEV_IT8213 is not set 435 - # CONFIG_BLK_DEV_IT821X is not set 436 - # CONFIG_BLK_DEV_NS87415 is not set 437 - # CONFIG_BLK_DEV_PDC202XX_OLD is not set 438 - # CONFIG_BLK_DEV_PDC202XX_NEW is not set 439 - # CONFIG_BLK_DEV_SVWKS is not set 440 - CONFIG_BLK_DEV_SGIIOC4=y 441 - # CONFIG_BLK_DEV_SIIMAGE is not set 442 - # CONFIG_BLK_DEV_SLC90E66 is not set 443 - # CONFIG_BLK_DEV_TRM290 is not set 444 - # CONFIG_BLK_DEV_VIA82CXXX is not set 445 - # CONFIG_BLK_DEV_TC86C001 is not set 446 - # CONFIG_IDE_ARM is not set 447 - CONFIG_BLK_DEV_IDEDMA=y 448 - # CONFIG_BLK_DEV_HD is not set 449 - 450 - # 451 - # SCSI device support 452 - # 453 - # CONFIG_RAID_ATTRS is not set 454 - CONFIG_SCSI=y 455 - CONFIG_SCSI_DMA=y 456 - # CONFIG_SCSI_TGT is not set 457 - CONFIG_SCSI_NETLINK=y 458 - CONFIG_SCSI_PROC_FS=y 459 - 460 - # 461 - # SCSI support type (disk, tape, CD-ROM) 462 - # 463 - CONFIG_BLK_DEV_SD=y 464 - CONFIG_CHR_DEV_ST=m 465 - # CONFIG_CHR_DEV_OSST is not set 466 - CONFIG_BLK_DEV_SR=m 467 - # CONFIG_BLK_DEV_SR_VENDOR is not set 468 - CONFIG_CHR_DEV_SG=m 469 - CONFIG_CHR_DEV_SCH=m 470 - 471 - # 472 - # Some SCSI devices (e.g. CD jukebox) support multiple LUNs 473 - # 474 - # CONFIG_SCSI_MULTI_LUN is not set 475 - CONFIG_SCSI_CONSTANTS=y 476 - # CONFIG_SCSI_LOGGING is not set 477 - # CONFIG_SCSI_SCAN_ASYNC is not set 478 - CONFIG_SCSI_WAIT_SCAN=m 479 - 480 - # 481 - # SCSI Transports 482 - # 483 - CONFIG_SCSI_SPI_ATTRS=y 484 - CONFIG_SCSI_FC_ATTRS=y 485 - CONFIG_SCSI_ISCSI_ATTRS=m 486 - CONFIG_SCSI_SAS_ATTRS=y 487 - CONFIG_SCSI_SAS_LIBSAS=y 488 - # CONFIG_SCSI_SAS_ATA is not set 489 - # CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set 490 - CONFIG_SCSI_SRP_ATTRS=y 491 - CONFIG_SCSI_LOWLEVEL=y 492 - CONFIG_ISCSI_TCP=m 493 - # CONFIG_BLK_DEV_3W_XXXX_RAID is not set 494 - # CONFIG_SCSI_3W_9XXX is not set 495 - # CONFIG_SCSI_ACARD is not set 496 - # CONFIG_SCSI_AACRAID is not set 497 - # CONFIG_SCSI_AIC7XXX is not set 498 - # CONFIG_SCSI_AIC7XXX_OLD is not set 499 - # CONFIG_SCSI_AIC79XX is not set 500 - # CONFIG_SCSI_AIC94XX is not set 501 - # CONFIG_SCSI_ADVANSYS is not set 502 - # CONFIG_SCSI_ARCMSR is not set 503 - # CONFIG_MEGARAID_NEWGEN is not set 504 - # CONFIG_MEGARAID_LEGACY is not set 505 - # CONFIG_MEGARAID_SAS is not set 506 - # CONFIG_SCSI_HPTIOP is not set 507 - # CONFIG_SCSI_DMX3191D is not set 508 - # CONFIG_SCSI_FUTURE_DOMAIN is not set 509 - # CONFIG_SCSI_IPS is not set 510 - # CONFIG_SCSI_INITIO is not set 511 - # CONFIG_SCSI_INIA100 is not set 512 - # CONFIG_SCSI_STEX is not set 513 - # CONFIG_SCSI_SYM53C8XX_2 is not set 514 - # CONFIG_SCSI_IPR is not set 515 - CONFIG_SCSI_QLOGIC_1280=y 516 - CONFIG_SCSI_QLA_FC=y 517 - # CONFIG_SCSI_QLA_ISCSI is not set 518 - # CONFIG_SCSI_LPFC is not set 519 - # CONFIG_SCSI_DC395x is not set 520 - # CONFIG_SCSI_DC390T is not set 521 - # CONFIG_SCSI_DEBUG is not set 522 - # CONFIG_SCSI_SRP is not set 523 - CONFIG_ATA=y 524 - CONFIG_ATA_NONSTANDARD=y 525 - CONFIG_ATA_ACPI=y 526 - # CONFIG_SATA_AHCI is not set 527 - # CONFIG_SATA_SVW is not set 528 - # CONFIG_ATA_PIIX is not set 529 - # CONFIG_SATA_MV is not set 530 - # CONFIG_SATA_NV is not set 531 - # CONFIG_PDC_ADMA is not set 532 - # CONFIG_SATA_QSTOR is not set 533 - # CONFIG_SATA_PROMISE is not set 534 - # CONFIG_SATA_SX4 is not set 535 - # CONFIG_SATA_SIL is not set 536 - # CONFIG_SATA_SIL24 is not set 537 - # CONFIG_SATA_SIS is not set 538 - # CONFIG_SATA_ULI is not set 539 - # CONFIG_SATA_VIA is not set 540 - CONFIG_SATA_VITESSE=y 541 - # CONFIG_SATA_INIC162X is not set 542 - # CONFIG_PATA_ACPI is not set 543 - # CONFIG_PATA_ALI is not set 544 - # CONFIG_PATA_AMD is not set 545 - # CONFIG_PATA_ARTOP is not set 546 - # CONFIG_PATA_ATIIXP is not set 547 - # CONFIG_PATA_CMD640_PCI is not set 548 - # CONFIG_PATA_CMD64X is not set 549 - # CONFIG_PATA_CS5520 is not set 550 - # CONFIG_PATA_CS5530 is not set 551 - # CONFIG_PATA_CYPRESS is not set 552 - # CONFIG_PATA_EFAR is not set 553 - # CONFIG_ATA_GENERIC is not set 554 - # CONFIG_PATA_HPT366 is not set 555 - # CONFIG_PATA_HPT37X is not set 556 - # CONFIG_PATA_HPT3X2N is not set 557 - # CONFIG_PATA_HPT3X3 is not set 558 - # CONFIG_PATA_IT821X is not set 559 - # CONFIG_PATA_IT8213 is not set 560 - # CONFIG_PATA_JMICRON is not set 561 - # CONFIG_PATA_TRIFLEX is not set 562 - # CONFIG_PATA_MARVELL is not set 563 - # CONFIG_PATA_MPIIX is not set 564 - # CONFIG_PATA_OLDPIIX is not set 565 - # CONFIG_PATA_NETCELL is not set 566 - # CONFIG_PATA_NS87410 is not set 567 - # CONFIG_PATA_NS87415 is not set 568 - # CONFIG_PATA_OPTI is not set 569 - # CONFIG_PATA_OPTIDMA is not set 570 - # CONFIG_PATA_PDC_OLD is not set 571 - # CONFIG_PATA_RADISYS is not set 572 - # CONFIG_PATA_RZ1000 is not set 573 - # CONFIG_PATA_SC1200 is not set 574 - # CONFIG_PATA_SERVERWORKS is not set 575 - # CONFIG_PATA_PDC2027X is not set 576 - # CONFIG_PATA_SIL680 is not set 577 - # CONFIG_PATA_SIS is not set 578 - # CONFIG_PATA_VIA is not set 579 - # CONFIG_PATA_WINBOND is not set 580 - CONFIG_MD=y 581 - CONFIG_BLK_DEV_MD=y 582 - CONFIG_MD_LINEAR=y 583 - CONFIG_MD_RAID0=y 584 - CONFIG_MD_RAID1=y 585 - # CONFIG_MD_RAID10 is not set 586 - CONFIG_MD_RAID456=y 587 - # CONFIG_MD_RAID5_RESHAPE is not set 588 - CONFIG_MD_MULTIPATH=y 589 - # CONFIG_MD_FAULTY is not set 590 - CONFIG_BLK_DEV_DM=y 591 - # CONFIG_DM_DEBUG is not set 592 - CONFIG_DM_CRYPT=m 593 - CONFIG_DM_SNAPSHOT=m 594 - CONFIG_DM_MIRROR=m 595 - CONFIG_DM_ZERO=m 596 - CONFIG_DM_MULTIPATH=m 597 - CONFIG_DM_MULTIPATH_EMC=m 598 - # CONFIG_DM_MULTIPATH_RDAC is not set 599 - # CONFIG_DM_DELAY is not set 600 - CONFIG_FUSION=y 601 - CONFIG_FUSION_SPI=y 602 - CONFIG_FUSION_FC=y 603 - CONFIG_FUSION_SAS=y 604 - CONFIG_FUSION_MAX_SGE=128 605 - CONFIG_FUSION_CTL=m 606 - CONFIG_FUSION_LOGGING=y 607 - 608 - # 609 - # IEEE 1394 (FireWire) support 610 - # 611 - # CONFIG_FIREWIRE is not set 612 - # CONFIG_IEEE1394 is not set 613 - # CONFIG_I2O is not set 614 - CONFIG_NETDEVICES=y 615 - # CONFIG_NETDEVICES_MULTIQUEUE is not set 616 - # CONFIG_DUMMY is not set 617 - # CONFIG_BONDING is not set 618 - # CONFIG_MACVLAN is not set 619 - # CONFIG_EQUALIZER is not set 620 - # CONFIG_TUN is not set 621 - # CONFIG_VETH is not set 622 - # CONFIG_NET_SB1000 is not set 623 - # CONFIG_IP1000 is not set 624 - # CONFIG_ARCNET is not set 625 - # CONFIG_NET_ETHERNET is not set 626 - CONFIG_NETDEV_1000=y 627 - # CONFIG_ACENIC is not set 628 - # CONFIG_DL2K is not set 629 - # CONFIG_E1000 is not set 630 - # CONFIG_E1000E is not set 631 - # CONFIG_NS83820 is not set 632 - # CONFIG_HAMACHI is not set 633 - # CONFIG_YELLOWFIN is not set 634 - # CONFIG_R8169 is not set 635 - # CONFIG_SIS190 is not set 636 - # CONFIG_SKGE is not set 637 - # CONFIG_SKY2 is not set 638 - # CONFIG_SK98LIN is not set 639 - # CONFIG_VIA_VELOCITY is not set 640 - CONFIG_TIGON3=y 641 - # CONFIG_BNX2 is not set 642 - # CONFIG_QLA3XXX is not set 643 - # CONFIG_ATL1 is not set 644 - CONFIG_NETDEV_10000=y 645 - CONFIG_CHELSIO_T1=m 646 - CONFIG_CHELSIO_T1_1G=y 647 - # CONFIG_CHELSIO_T1_NAPI is not set 648 - CONFIG_CHELSIO_T3=m 649 - CONFIG_IXGBE=m 650 - # CONFIG_IXGB is not set 651 - CONFIG_S2IO=m 652 - # CONFIG_S2IO_NAPI is not set 653 - # CONFIG_MYRI10GE is not set 654 - # CONFIG_NETXEN_NIC is not set 655 - # CONFIG_NIU is not set 656 - # CONFIG_MLX4_CORE is not set 657 - # CONFIG_TEHUTI is not set 658 - # CONFIG_TR is not set 659 - 660 - # 661 - # Wireless LAN 662 - # 663 - # CONFIG_WLAN_PRE80211 is not set 664 - # CONFIG_WLAN_80211 is not set 665 - 666 - # 667 - # USB Network Adapters 668 - # 669 - # CONFIG_USB_CATC is not set 670 - # CONFIG_USB_KAWETH is not set 671 - # CONFIG_USB_PEGASUS is not set 672 - # CONFIG_USB_RTL8150 is not set 673 - # CONFIG_USB_USBNET_MII is not set 674 - # CONFIG_USB_USBNET is not set 675 - # CONFIG_WAN is not set 676 - # CONFIG_FDDI is not set 677 - # CONFIG_HIPPI is not set 678 - # CONFIG_PPP is not set 679 - # CONFIG_SLIP is not set 680 - # CONFIG_NET_FC is not set 681 - # CONFIG_SHAPER is not set 682 - CONFIG_NETCONSOLE=y 683 - # CONFIG_NETCONSOLE_DYNAMIC is not set 684 - CONFIG_NETPOLL=y 685 - # CONFIG_NETPOLL_TRAP is not set 686 - CONFIG_NET_POLL_CONTROLLER=y 687 - # CONFIG_ISDN is not set 688 - # CONFIG_PHONE is not set 689 - 690 - # 691 - # Input device support 692 - # 693 - CONFIG_INPUT=y 694 - # CONFIG_INPUT_FF_MEMLESS is not set 695 - # CONFIG_INPUT_POLLDEV is not set 696 - 697 - # 698 - # Userland interfaces 699 - # 700 - CONFIG_INPUT_MOUSEDEV=y 701 - # CONFIG_INPUT_MOUSEDEV_PSAUX is not set 702 - CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 703 - CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 704 - # CONFIG_INPUT_JOYDEV is not set 705 - # CONFIG_INPUT_EVDEV is not set 706 - # CONFIG_INPUT_EVBUG is not set 707 - 708 - # 709 - # Input Device Drivers 710 - # 711 - # CONFIG_INPUT_KEYBOARD is not set 712 - # CONFIG_INPUT_MOUSE is not set 713 - # CONFIG_INPUT_JOYSTICK is not set 714 - # CONFIG_INPUT_TABLET is not set 715 - # CONFIG_INPUT_TOUCHSCREEN is not set 716 - # CONFIG_INPUT_MISC is not set 717 - 718 - # 719 - # Hardware I/O ports 720 - # 721 - # CONFIG_SERIO is not set 722 - # CONFIG_GAMEPORT is not set 723 - 724 - # 725 - # Character devices 726 - # 727 - CONFIG_VT=y 728 - # CONFIG_VT_UNICODE is not set 729 - CONFIG_VT_CONSOLE=y 730 - CONFIG_HW_CONSOLE=y 731 - # CONFIG_VT_HW_CONSOLE_BINDING is not set 732 - CONFIG_SERIAL_NONSTANDARD=y 733 - # CONFIG_COMPUTONE is not set 734 - # CONFIG_ROCKETPORT is not set 735 - # CONFIG_CYCLADES is not set 736 - # CONFIG_DIGIEPCA is not set 737 - # CONFIG_MOXA_INTELLIO is not set 738 - # CONFIG_MOXA_SMARTIO is not set 739 - # CONFIG_MOXA_SMARTIO_NEW is not set 740 - # CONFIG_ISI is not set 741 - # CONFIG_SYNCLINKMP is not set 742 - # CONFIG_SYNCLINK_GT is not set 743 - # CONFIG_N_HDLC is not set 744 - # CONFIG_SPECIALIX is not set 745 - # CONFIG_SX is not set 746 - # CONFIG_RIO is not set 747 - # CONFIG_STALDRV is not set 748 - CONFIG_SGI_SNSC=y 749 - CONFIG_SGI_TIOCX=y 750 - CONFIG_SGI_MBCS=m 751 - 752 - # 753 - # Serial drivers 754 - # 755 - # CONFIG_SERIAL_8250 is not set 756 - 757 - # 758 - # Non-8250 serial port support 759 - # 760 - CONFIG_SERIAL_CORE=y 761 - CONFIG_SERIAL_CORE_CONSOLE=y 762 - CONFIG_SERIAL_SGI_L1_CONSOLE=y 763 - # CONFIG_SERIAL_JSM is not set 764 - CONFIG_SERIAL_SGI_IOC4=y 765 - CONFIG_SERIAL_SGI_IOC3=y 766 - CONFIG_UNIX98_PTYS=y 767 - CONFIG_LEGACY_PTYS=y 768 - CONFIG_LEGACY_PTY_COUNT=256 769 - # CONFIG_IPMI_HANDLER is not set 770 - # CONFIG_WATCHDOG is not set 771 - # CONFIG_HW_RANDOM is not set 772 - CONFIG_EFI_RTC=y 773 - # CONFIG_R3964 is not set 774 - # CONFIG_APPLICOM is not set 775 - CONFIG_RAW_DRIVER=m 776 - CONFIG_MAX_RAW_DEVS=256 777 - # CONFIG_HPET is not set 778 - # CONFIG_HANGCHECK_TIMER is not set 779 - CONFIG_MMTIMER=y 780 - # CONFIG_TCG_TPM is not set 781 - CONFIG_DEVPORT=y 782 - # CONFIG_I2C is not set 783 - 784 - # 785 - # SPI support 786 - # 787 - # CONFIG_SPI is not set 788 - # CONFIG_SPI_MASTER is not set 789 - # CONFIG_W1 is not set 790 - # CONFIG_POWER_SUPPLY is not set 791 - # CONFIG_HWMON is not set 792 - 793 - # 794 - # Sonics Silicon Backplane 795 - # 796 - CONFIG_SSB_POSSIBLE=y 797 - # CONFIG_SSB is not set 798 - 799 - # 800 - # Multifunction device drivers 801 - # 802 - # CONFIG_MFD_SM501 is not set 803 - 804 - # 805 - # Multimedia devices 806 - # 807 - # CONFIG_VIDEO_DEV is not set 808 - # CONFIG_DVB_CORE is not set 809 - # CONFIG_DAB is not set 810 - 811 - # 812 - # Graphics support 813 - # 814 - CONFIG_AGP=y 815 - CONFIG_AGP_SGI_TIOCA=y 816 - # CONFIG_DRM is not set 817 - # CONFIG_VGASTATE is not set 818 - CONFIG_VIDEO_OUTPUT_CONTROL=m 819 - # CONFIG_FB is not set 820 - # CONFIG_BACKLIGHT_LCD_SUPPORT is not set 821 - 822 - # 823 - # Display device support 824 - # 825 - # CONFIG_DISPLAY_SUPPORT is not set 826 - 827 - # 828 - # Console display driver support 829 - # 830 - CONFIG_VGA_CONSOLE=y 831 - # CONFIG_VGACON_SOFT_SCROLLBACK is not set 832 - CONFIG_DUMMY_CONSOLE=y 833 - 834 - # 835 - # Sound 836 - # 837 - # CONFIG_SOUND is not set 838 - CONFIG_HID_SUPPORT=y 839 - CONFIG_HID=y 840 - CONFIG_HID_DEBUG=y 841 - # CONFIG_HIDRAW is not set 842 - 843 - # 844 - # USB Input Devices 845 - # 846 - CONFIG_USB_HID=m 847 - # CONFIG_USB_HIDINPUT_POWERBOOK is not set 848 - # CONFIG_HID_FF is not set 849 - # CONFIG_USB_HIDDEV is not set 850 - 851 - # 852 - # USB HID Boot Protocol drivers 853 - # 854 - # CONFIG_USB_KBD is not set 855 - # CONFIG_USB_MOUSE is not set 856 - CONFIG_USB_SUPPORT=y 857 - CONFIG_USB_ARCH_HAS_HCD=y 858 - CONFIG_USB_ARCH_HAS_OHCI=y 859 - CONFIG_USB_ARCH_HAS_EHCI=y 860 - CONFIG_USB=m 861 - # CONFIG_USB_DEBUG is not set 862 - 863 - # 864 - # Miscellaneous USB options 865 - # 866 - # CONFIG_USB_DEVICEFS is not set 867 - CONFIG_USB_DEVICE_CLASS=y 868 - # CONFIG_USB_DYNAMIC_MINORS is not set 869 - # CONFIG_USB_SUSPEND is not set 870 - # CONFIG_USB_PERSIST is not set 871 - # CONFIG_USB_OTG is not set 872 - 873 - # 874 - # USB Host Controller Drivers 875 - # 876 - CONFIG_USB_EHCI_HCD=m 877 - # CONFIG_USB_EHCI_SPLIT_ISO is not set 878 - # CONFIG_USB_EHCI_ROOT_HUB_TT is not set 879 - # CONFIG_USB_EHCI_TT_NEWSCHED is not set 880 - # CONFIG_USB_ISP116X_HCD is not set 881 - CONFIG_USB_OHCI_HCD=m 882 - # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set 883 - # CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set 884 - CONFIG_USB_OHCI_LITTLE_ENDIAN=y 885 - CONFIG_USB_UHCI_HCD=m 886 - # CONFIG_USB_SL811_HCD is not set 887 - # CONFIG_USB_R8A66597_HCD is not set 888 - 889 - # 890 - # USB Device Class drivers 891 - # 892 - # CONFIG_USB_ACM is not set 893 - # CONFIG_USB_PRINTER is not set 894 - 895 - # 896 - # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 897 - # 898 - 899 - # 900 - # may also be needed; see USB_STORAGE Help for more information 901 - # 902 - # CONFIG_USB_STORAGE is not set 903 - # CONFIG_USB_LIBUSUAL is not set 904 - 905 - # 906 - # USB Imaging devices 907 - # 908 - # CONFIG_USB_MDC800 is not set 909 - # CONFIG_USB_MICROTEK is not set 910 - CONFIG_USB_MON=y 911 - 912 - # 913 - # USB port drivers 914 - # 915 - 916 - # 917 - # USB Serial Converter support 918 - # 919 - # CONFIG_USB_SERIAL is not set 920 - 921 - # 922 - # USB Miscellaneous drivers 923 - # 924 - # CONFIG_USB_EMI62 is not set 925 - # CONFIG_USB_EMI26 is not set 926 - # CONFIG_USB_ADUTUX is not set 927 - # CONFIG_USB_AUERSWALD is not set 928 - # CONFIG_USB_RIO500 is not set 929 - # CONFIG_USB_LEGOTOWER is not set 930 - # CONFIG_USB_LCD is not set 931 - # CONFIG_USB_BERRY_CHARGE is not set 932 - # CONFIG_USB_LED is not set 933 - # CONFIG_USB_CYPRESS_CY7C63 is not set 934 - # CONFIG_USB_CYTHERM is not set 935 - # CONFIG_USB_PHIDGET is not set 936 - # CONFIG_USB_IDMOUSE is not set 937 - # CONFIG_USB_FTDI_ELAN is not set 938 - # CONFIG_USB_APPLEDISPLAY is not set 939 - # CONFIG_USB_SISUSBVGA is not set 940 - # CONFIG_USB_LD is not set 941 - # CONFIG_USB_TRANCEVIBRATOR is not set 942 - # CONFIG_USB_IOWARRIOR is not set 943 - 944 - # 945 - # USB DSL modem support 946 - # 947 - 948 - # 949 - # USB Gadget Support 950 - # 951 - # CONFIG_USB_GADGET is not set 952 - # CONFIG_MMC is not set 953 - # CONFIG_NEW_LEDS is not set 954 - CONFIG_INFINIBAND=m 955 - # CONFIG_INFINIBAND_USER_MAD is not set 956 - CONFIG_INFINIBAND_USER_ACCESS=m 957 - CONFIG_INFINIBAND_USER_MEM=y 958 - CONFIG_INFINIBAND_ADDR_TRANS=y 959 - CONFIG_INFINIBAND_MTHCA=m 960 - CONFIG_INFINIBAND_MTHCA_DEBUG=y 961 - # CONFIG_INFINIBAND_AMSO1100 is not set 962 - # CONFIG_INFINIBAND_CXGB3 is not set 963 - # CONFIG_MLX4_INFINIBAND is not set 964 - CONFIG_INFINIBAND_IPOIB=m 965 - # CONFIG_INFINIBAND_IPOIB_CM is not set 966 - CONFIG_INFINIBAND_IPOIB_DEBUG=y 967 - # CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set 968 - CONFIG_INFINIBAND_SRP=m 969 - # CONFIG_INFINIBAND_ISER is not set 970 - # CONFIG_RTC_CLASS is not set 971 - 972 - # 973 - # Userspace I/O 974 - # 975 - # CONFIG_UIO is not set 976 - CONFIG_MSPEC=y 977 - 978 - # 979 - # File systems 980 - # 981 - CONFIG_EXT2_FS=y 982 - CONFIG_EXT2_FS_XATTR=y 983 - CONFIG_EXT2_FS_POSIX_ACL=y 984 - CONFIG_EXT2_FS_SECURITY=y 985 - # CONFIG_EXT2_FS_XIP is not set 986 - CONFIG_EXT3_FS=y 987 - CONFIG_EXT3_FS_XATTR=y 988 - CONFIG_EXT3_FS_POSIX_ACL=y 989 - CONFIG_EXT3_FS_SECURITY=y 990 - # CONFIG_EXT4DEV_FS is not set 991 - CONFIG_JBD=y 992 - # CONFIG_JBD_DEBUG is not set 993 - CONFIG_FS_MBCACHE=y 994 - CONFIG_REISERFS_FS=y 995 - # CONFIG_REISERFS_CHECK is not set 996 - # CONFIG_REISERFS_PROC_INFO is not set 997 - CONFIG_REISERFS_FS_XATTR=y 998 - CONFIG_REISERFS_FS_POSIX_ACL=y 999 - CONFIG_REISERFS_FS_SECURITY=y 1000 - # CONFIG_JFS_FS is not set 1001 - CONFIG_FS_POSIX_ACL=y 1002 - CONFIG_XFS_FS=y 1003 - CONFIG_XFS_QUOTA=y 1004 - # CONFIG_XFS_SECURITY is not set 1005 - CONFIG_XFS_POSIX_ACL=y 1006 - CONFIG_XFS_RT=y 1007 - # CONFIG_GFS2_FS is not set 1008 - # CONFIG_OCFS2_FS is not set 1009 - # CONFIG_MINIX_FS is not set 1010 - # CONFIG_ROMFS_FS is not set 1011 - CONFIG_INOTIFY=y 1012 - CONFIG_INOTIFY_USER=y 1013 - CONFIG_QUOTA=y 1014 - CONFIG_QUOTA_NETLINK_INTERFACE=y 1015 - CONFIG_PRINT_QUOTA_WARNING=y 1016 - # CONFIG_QFMT_V1 is not set 1017 - # CONFIG_QFMT_V2 is not set 1018 - CONFIG_QUOTACTL=y 1019 - CONFIG_DNOTIFY=y 1020 - CONFIG_AUTOFS_FS=m 1021 - CONFIG_AUTOFS4_FS=m 1022 - CONFIG_FUSE_FS=m 1023 - 1024 - # 1025 - # CD-ROM/DVD Filesystems 1026 - # 1027 - CONFIG_ISO9660_FS=y 1028 - CONFIG_JOLIET=y 1029 - # CONFIG_ZISOFS is not set 1030 - CONFIG_UDF_FS=m 1031 - CONFIG_UDF_NLS=y 1032 - 1033 - # 1034 - # DOS/FAT/NT Filesystems 1035 - # 1036 - CONFIG_FAT_FS=y 1037 - # CONFIG_MSDOS_FS is not set 1038 - CONFIG_VFAT_FS=y 1039 - CONFIG_FAT_DEFAULT_CODEPAGE=437 1040 - CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" 1041 - # CONFIG_NTFS_FS is not set 1042 - 1043 - # 1044 - # Pseudo filesystems 1045 - # 1046 - CONFIG_PROC_FS=y 1047 - CONFIG_PROC_KCORE=y 1048 - CONFIG_PROC_SYSCTL=y 1049 - CONFIG_SYSFS=y 1050 - CONFIG_TMPFS=y 1051 - # CONFIG_TMPFS_POSIX_ACL is not set 1052 - CONFIG_HUGETLBFS=y 1053 - CONFIG_HUGETLB_PAGE=y 1054 - # CONFIG_CONFIGFS_FS is not set 1055 - 1056 - # 1057 - # Miscellaneous filesystems 1058 - # 1059 - # CONFIG_ADFS_FS is not set 1060 - # CONFIG_AFFS_FS is not set 1061 - # CONFIG_HFS_FS is not set 1062 - # CONFIG_HFSPLUS_FS is not set 1063 - # CONFIG_BEFS_FS is not set 1064 - # CONFIG_BFS_FS is not set 1065 - # CONFIG_EFS_FS is not set 1066 - # CONFIG_CRAMFS is not set 1067 - # CONFIG_VXFS_FS is not set 1068 - # CONFIG_HPFS_FS is not set 1069 - # CONFIG_QNX4FS_FS is not set 1070 - # CONFIG_SYSV_FS is not set 1071 - # CONFIG_UFS_FS is not set 1072 - CONFIG_NETWORK_FILESYSTEMS=y 1073 - CONFIG_NFS_FS=m 1074 - CONFIG_NFS_V3=y 1075 - # CONFIG_NFS_V3_ACL is not set 1076 - CONFIG_NFS_V4=y 1077 - CONFIG_NFS_DIRECTIO=y 1078 - CONFIG_NFSD=m 1079 - CONFIG_NFSD_V3=y 1080 - # CONFIG_NFSD_V3_ACL is not set 1081 - CONFIG_NFSD_V4=y 1082 - CONFIG_NFSD_TCP=y 1083 - CONFIG_LOCKD=m 1084 - CONFIG_LOCKD_V4=y 1085 - CONFIG_EXPORTFS=m 1086 - CONFIG_NFS_COMMON=y 1087 - CONFIG_SUNRPC=m 1088 - CONFIG_SUNRPC_GSS=m 1089 - CONFIG_SUNRPC_XPRT_RDMA=m 1090 - # CONFIG_SUNRPC_BIND34 is not set 1091 - CONFIG_RPCSEC_GSS_KRB5=m 1092 - # CONFIG_RPCSEC_GSS_SPKM3 is not set 1093 - CONFIG_SMB_FS=m 1094 - # CONFIG_SMB_NLS_DEFAULT is not set 1095 - CONFIG_CIFS=m 1096 - # CONFIG_CIFS_STATS is not set 1097 - # CONFIG_CIFS_WEAK_PW_HASH is not set 1098 - # CONFIG_CIFS_XATTR is not set 1099 - # CONFIG_CIFS_DEBUG2 is not set 1100 - # CONFIG_CIFS_EXPERIMENTAL is not set 1101 - # CONFIG_NCP_FS is not set 1102 - # CONFIG_CODA_FS is not set 1103 - # CONFIG_AFS_FS is not set 1104 - 1105 - # 1106 - # Partition Types 1107 - # 1108 - CONFIG_PARTITION_ADVANCED=y 1109 - # CONFIG_ACORN_PARTITION is not set 1110 - # CONFIG_OSF_PARTITION is not set 1111 - # CONFIG_AMIGA_PARTITION is not set 1112 - # CONFIG_ATARI_PARTITION is not set 1113 - # CONFIG_MAC_PARTITION is not set 1114 - CONFIG_MSDOS_PARTITION=y 1115 - # CONFIG_BSD_DISKLABEL is not set 1116 - # CONFIG_MINIX_SUBPARTITION is not set 1117 - # CONFIG_SOLARIS_X86_PARTITION is not set 1118 - # CONFIG_UNIXWARE_DISKLABEL is not set 1119 - # CONFIG_LDM_PARTITION is not set 1120 - CONFIG_SGI_PARTITION=y 1121 - # CONFIG_ULTRIX_PARTITION is not set 1122 - # CONFIG_SUN_PARTITION is not set 1123 - # CONFIG_KARMA_PARTITION is not set 1124 - CONFIG_EFI_PARTITION=y 1125 - # CONFIG_SYSV68_PARTITION is not set 1126 - CONFIG_NLS=y 1127 - CONFIG_NLS_DEFAULT="iso8859-1" 1128 - CONFIG_NLS_CODEPAGE_437=y 1129 - # CONFIG_NLS_CODEPAGE_737 is not set 1130 - # CONFIG_NLS_CODEPAGE_775 is not set 1131 - # CONFIG_NLS_CODEPAGE_850 is not set 1132 - # CONFIG_NLS_CODEPAGE_852 is not set 1133 - # CONFIG_NLS_CODEPAGE_855 is not set 1134 - # CONFIG_NLS_CODEPAGE_857 is not set 1135 - # CONFIG_NLS_CODEPAGE_860 is not set 1136 - # CONFIG_NLS_CODEPAGE_861 is not set 1137 - # CONFIG_NLS_CODEPAGE_862 is not set 1138 - # CONFIG_NLS_CODEPAGE_863 is not set 1139 - # CONFIG_NLS_CODEPAGE_864 is not set 1140 - # CONFIG_NLS_CODEPAGE_865 is not set 1141 - # CONFIG_NLS_CODEPAGE_866 is not set 1142 - # CONFIG_NLS_CODEPAGE_869 is not set 1143 - # CONFIG_NLS_CODEPAGE_936 is not set 1144 - # CONFIG_NLS_CODEPAGE_950 is not set 1145 - # CONFIG_NLS_CODEPAGE_932 is not set 1146 - # CONFIG_NLS_CODEPAGE_949 is not set 1147 - # CONFIG_NLS_CODEPAGE_874 is not set 1148 - # CONFIG_NLS_ISO8859_8 is not set 1149 - # CONFIG_NLS_CODEPAGE_1250 is not set 1150 - # CONFIG_NLS_CODEPAGE_1251 is not set 1151 - # CONFIG_NLS_ASCII is not set 1152 - CONFIG_NLS_ISO8859_1=y 1153 - # CONFIG_NLS_ISO8859_2 is not set 1154 - # CONFIG_NLS_ISO8859_3 is not set 1155 - # CONFIG_NLS_ISO8859_4 is not set 1156 - # CONFIG_NLS_ISO8859_5 is not set 1157 - # CONFIG_NLS_ISO8859_6 is not set 1158 - # CONFIG_NLS_ISO8859_7 is not set 1159 - # CONFIG_NLS_ISO8859_9 is not set 1160 - # CONFIG_NLS_ISO8859_13 is not set 1161 - # CONFIG_NLS_ISO8859_14 is not set 1162 - # CONFIG_NLS_ISO8859_15 is not set 1163 - # CONFIG_NLS_KOI8_R is not set 1164 - # CONFIG_NLS_KOI8_U is not set 1165 - CONFIG_NLS_UTF8=y 1166 - # CONFIG_DLM is not set 1167 - 1168 - # 1169 - # Library routines 1170 - # 1171 - CONFIG_BITREVERSE=y 1172 - # CONFIG_CRC_CCITT is not set 1173 - CONFIG_CRC16=m 1174 - # CONFIG_CRC_ITU_T is not set 1175 - CONFIG_CRC32=y 1176 - # CONFIG_CRC7 is not set 1177 - CONFIG_LIBCRC32C=m 1178 - CONFIG_ZLIB_INFLATE=m 1179 - CONFIG_ZLIB_DEFLATE=m 1180 - CONFIG_GENERIC_ALLOCATOR=y 1181 - CONFIG_PLIST=y 1182 - CONFIG_HAS_IOMEM=y 1183 - CONFIG_HAS_IOPORT=y 1184 - CONFIG_HAS_DMA=y 1185 - CONFIG_GENERIC_HARDIRQS=y 1186 - CONFIG_GENERIC_IRQ_PROBE=y 1187 - CONFIG_GENERIC_PENDING_IRQ=y 1188 - CONFIG_IRQ_PER_CPU=y 1189 - 1190 - # 1191 - # Instrumentation Support 1192 - # 1193 - # CONFIG_PROFILING is not set 1194 - # CONFIG_KPROBES is not set 1195 - 1196 - # 1197 - # Kernel hacking 1198 - # 1199 - # CONFIG_PRINTK_TIME is not set 1200 - CONFIG_ENABLE_MUST_CHECK=y 1201 - CONFIG_MAGIC_SYSRQ=y 1202 - # CONFIG_UNUSED_SYMBOLS is not set 1203 - # CONFIG_DEBUG_FS is not set 1204 - # CONFIG_HEADERS_CHECK is not set 1205 - CONFIG_DEBUG_KERNEL=y 1206 - # CONFIG_DEBUG_SHIRQ is not set 1207 - CONFIG_DETECT_SOFTLOCKUP=y 1208 - CONFIG_SCHED_DEBUG=y 1209 - # CONFIG_SCHEDSTATS is not set 1210 - # CONFIG_TIMER_STATS is not set 1211 - # CONFIG_SLUB_DEBUG_ON is not set 1212 - # CONFIG_DEBUG_RT_MUTEXES is not set 1213 - # CONFIG_RT_MUTEX_TESTER is not set 1214 - # CONFIG_DEBUG_SPINLOCK is not set 1215 - # CONFIG_DEBUG_MUTEXES is not set 1216 - # CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1217 - # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1218 - # CONFIG_DEBUG_KOBJECT is not set 1219 - CONFIG_DEBUG_INFO=y 1220 - # CONFIG_DEBUG_VM is not set 1221 - # CONFIG_DEBUG_LIST is not set 1222 - CONFIG_FORCED_INLINING=y 1223 - # CONFIG_BOOT_PRINTK_DELAY is not set 1224 - # CONFIG_RCU_TORTURE_TEST is not set 1225 - # CONFIG_FAULT_INJECTION is not set 1226 - CONFIG_IA64_GRANULE_16MB=y 1227 - # CONFIG_IA64_GRANULE_64MB is not set 1228 - # CONFIG_IA64_PRINT_HAZARDS is not set 1229 - # CONFIG_DISABLE_VHPT is not set 1230 - # CONFIG_IA64_DEBUG_CMPXCHG is not set 1231 - # CONFIG_IA64_DEBUG_IRQ is not set 1232 - CONFIG_SYSVIPC_COMPAT=y 1233 - 1234 - # 1235 - # Security options 1236 - # 1237 - # CONFIG_KEYS is not set 1238 - # CONFIG_SECURITY is not set 1239 - # CONFIG_SECURITY_FILE_CAPABILITIES is not set 1240 - CONFIG_XOR_BLOCKS=y 1241 - CONFIG_ASYNC_CORE=y 1242 - CONFIG_ASYNC_MEMCPY=y 1243 - CONFIG_ASYNC_XOR=y 1244 - CONFIG_CRYPTO=y 1245 - CONFIG_CRYPTO_ALGAPI=y 1246 - CONFIG_CRYPTO_BLKCIPHER=m 1247 - CONFIG_CRYPTO_HASH=y 1248 - CONFIG_CRYPTO_MANAGER=y 1249 - CONFIG_CRYPTO_HMAC=y 1250 - # CONFIG_CRYPTO_XCBC is not set 1251 - # CONFIG_CRYPTO_NULL is not set 1252 - # CONFIG_CRYPTO_MD4 is not set 1253 - CONFIG_CRYPTO_MD5=y 1254 - CONFIG_CRYPTO_SHA1=m 1255 - # CONFIG_CRYPTO_SHA256 is not set 1256 - # CONFIG_CRYPTO_SHA512 is not set 1257 - # CONFIG_CRYPTO_WP512 is not set 1258 - # CONFIG_CRYPTO_TGR192 is not set 1259 - # CONFIG_CRYPTO_GF128MUL is not set 1260 - CONFIG_CRYPTO_ECB=m 1261 - CONFIG_CRYPTO_CBC=m 1262 - CONFIG_CRYPTO_PCBC=m 1263 - # CONFIG_CRYPTO_LRW is not set 1264 - # CONFIG_CRYPTO_XTS is not set 1265 - # CONFIG_CRYPTO_CRYPTD is not set 1266 - CONFIG_CRYPTO_DES=m 1267 - # CONFIG_CRYPTO_FCRYPT is not set 1268 - # CONFIG_CRYPTO_BLOWFISH is not set 1269 - # CONFIG_CRYPTO_TWOFISH is not set 1270 - # CONFIG_CRYPTO_SERPENT is not set 1271 - # CONFIG_CRYPTO_AES is not set 1272 - # CONFIG_CRYPTO_CAST5 is not set 1273 - # CONFIG_CRYPTO_CAST6 is not set 1274 - # CONFIG_CRYPTO_TEA is not set 1275 - # CONFIG_CRYPTO_ARC4 is not set 1276 - # CONFIG_CRYPTO_KHAZAD is not set 1277 - # CONFIG_CRYPTO_ANUBIS is not set 1278 - # CONFIG_CRYPTO_SEED is not set 1279 - CONFIG_CRYPTO_DEFLATE=m 1280 - # CONFIG_CRYPTO_MICHAEL_MIC is not set 1281 - CONFIG_CRYPTO_CRC32C=m 1282 - # CONFIG_CRYPTO_CAMELLIA is not set 1283 - # CONFIG_CRYPTO_TEST is not set 1284 - # CONFIG_CRYPTO_AUTHENC is not set 1285 - # CONFIG_CRYPTO_HW is not set
+1 -1
arch/ia64/ia32/ia32_entry.S
··· 262 262 data8 sys_uselib 263 263 data8 sys_swapon 264 264 data8 sys_reboot 265 - data8 sys32_readdir 265 + data8 compat_sys_old_readdir 266 266 data8 sys32_mmap /* 90 */ 267 267 data8 sys32_munmap 268 268 data8 sys_truncate
-7
arch/ia64/ia32/ia32priv.h
··· 276 276 } _sifields; 277 277 } compat_siginfo_t; 278 278 279 - struct old_linux32_dirent { 280 - u32 d_ino; 281 - u32 d_offset; 282 - u16 d_namlen; 283 - char d_name[1]; 284 - }; 285 - 286 279 /* 287 280 * IA-32 ELF specific definitions for IA-64. 288 281 */
-132
arch/ia64/ia32/sys_ia32.c
··· 1210 1210 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); 1211 1211 } 1212 1212 1213 - struct getdents32_callback { 1214 - struct compat_dirent __user *current_dir; 1215 - struct compat_dirent __user *previous; 1216 - int count; 1217 - int error; 1218 - }; 1219 - 1220 - struct readdir32_callback { 1221 - struct old_linux32_dirent __user * dirent; 1222 - int count; 1223 - }; 1224 - 1225 - static int 1226 - filldir32 (void *__buf, const char *name, int namlen, loff_t offset, u64 ino, 1227 - unsigned int d_type) 1228 - { 1229 - struct compat_dirent __user * dirent; 1230 - struct getdents32_callback * buf = (struct getdents32_callback *) __buf; 1231 - int reclen = ROUND_UP(offsetof(struct compat_dirent, d_name) + namlen + 1, 4); 1232 - u32 d_ino; 1233 - 1234 - buf->error = -EINVAL; /* only used if we fail.. */ 1235 - if (reclen > buf->count) 1236 - return -EINVAL; 1237 - d_ino = ino; 1238 - if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) 1239 - return -EOVERFLOW; 1240 - buf->error = -EFAULT; /* only used if we fail.. */ 1241 - dirent = buf->previous; 1242 - if (dirent) 1243 - if (put_user(offset, &dirent->d_off)) 1244 - return -EFAULT; 1245 - dirent = buf->current_dir; 1246 - buf->previous = dirent; 1247 - if (put_user(d_ino, &dirent->d_ino) 1248 - || put_user(reclen, &dirent->d_reclen) 1249 - || copy_to_user(dirent->d_name, name, namlen) 1250 - || put_user(0, dirent->d_name + namlen)) 1251 - return -EFAULT; 1252 - dirent = (struct compat_dirent __user *) ((char __user *) dirent + reclen); 1253 - buf->current_dir = dirent; 1254 - buf->count -= reclen; 1255 - return 0; 1256 - } 1257 - 1258 - asmlinkage long 1259 - sys32_getdents (unsigned int fd, struct compat_dirent __user *dirent, unsigned int count) 1260 - { 1261 - struct file * file; 1262 - struct compat_dirent __user * lastdirent; 1263 - struct getdents32_callback buf; 1264 - int error; 1265 - 1266 - error = -EFAULT; 1267 - if (!access_ok(VERIFY_WRITE, dirent, count)) 1268 - goto out; 1269 - 1270 - error = -EBADF; 1271 - file = fget(fd); 1272 - if (!file) 1273 - goto out; 1274 - 1275 - buf.current_dir = dirent; 1276 - buf.previous = NULL; 1277 - buf.count = count; 1278 - buf.error = 0; 1279 - 1280 - error = vfs_readdir(file, filldir32, &buf); 1281 - if (error < 0) 1282 - goto out_putf; 1283 - error = buf.error; 1284 - lastdirent = buf.previous; 1285 - if (lastdirent) { 1286 - if (put_user(file->f_pos, &lastdirent->d_off)) 1287 - error = -EFAULT; 1288 - else 1289 - error = count - buf.count; 1290 - } 1291 - 1292 - out_putf: 1293 - fput(file); 1294 - out: 1295 - return error; 1296 - } 1297 - 1298 - static int 1299 - fillonedir32 (void * __buf, const char * name, int namlen, loff_t offset, u64 ino, 1300 - unsigned int d_type) 1301 - { 1302 - struct readdir32_callback * buf = (struct readdir32_callback *) __buf; 1303 - struct old_linux32_dirent __user * dirent; 1304 - u32 d_ino; 1305 - 1306 - if (buf->count) 1307 - return -EINVAL; 1308 - d_ino = ino; 1309 - if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) 1310 - return -EOVERFLOW; 1311 - buf->count++; 1312 - dirent = buf->dirent; 1313 - if (put_user(d_ino, &dirent->d_ino) 1314 - || put_user(offset, &dirent->d_offset) 1315 - || put_user(namlen, &dirent->d_namlen) 1316 - || copy_to_user(dirent->d_name, name, namlen) 1317 - || put_user(0, dirent->d_name + namlen)) 1318 - return -EFAULT; 1319 - return 0; 1320 - } 1321 - 1322 - asmlinkage long 1323 - sys32_readdir (unsigned int fd, void __user *dirent, unsigned int count) 1324 - { 1325 - int error; 1326 - struct file * file; 1327 - struct readdir32_callback buf; 1328 - 1329 - error = -EBADF; 1330 - file = fget(fd); 1331 - if (!file) 1332 - goto out; 1333 - 1334 - buf.count = 0; 1335 - buf.dirent = dirent; 1336 - 1337 - error = vfs_readdir(file, fillonedir32, &buf); 1338 - if (error >= 0) 1339 - error = buf.count; 1340 - fput(file); 1341 - out: 1342 - return error; 1343 - } 1344 - 1345 1213 struct sel_arg_struct { 1346 1214 unsigned int n; 1347 1215 unsigned int inp;
+4 -4
arch/ia64/kernel/smp.c
··· 58 58 unsigned int count; 59 59 } __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS]; 60 60 61 - static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; 61 + static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; 62 62 63 63 #define IPI_CALL_FUNC 0 64 64 #define IPI_CPU_STOP 1 ··· 254 254 void 255 255 smp_flush_tlb_cpumask(cpumask_t xcpumask) 256 256 { 257 - unsigned int *counts = __ia64_per_cpu_var(shadow_flush_counts); 257 + unsigned short *counts = __ia64_per_cpu_var(shadow_flush_counts); 258 258 cpumask_t cpumask = xcpumask; 259 259 int mycpu, cpu, flush_mycpu = 0; 260 260 ··· 262 262 mycpu = smp_processor_id(); 263 263 264 264 for_each_cpu_mask(cpu, cpumask) 265 - counts[cpu] = local_tlb_flush_counts[cpu].count; 265 + counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff; 266 266 267 267 mb(); 268 268 for_each_cpu_mask(cpu, cpumask) { ··· 276 276 smp_local_flush_tlb(); 277 277 278 278 for_each_cpu_mask(cpu, cpumask) 279 - while(counts[cpu] == local_tlb_flush_counts[cpu].count) 279 + while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff)) 280 280 udelay(FLUSH_DELAY); 281 281 282 282 preempt_enable();
+12 -11
arch/ia64/pci/pci.c
··· 324 324 struct pci_bus * __devinit 325 325 pci_acpi_scan_root(struct acpi_device *device, int domain, int bus) 326 326 { 327 - struct pci_root_info info; 328 327 struct pci_controller *controller; 329 328 unsigned int windows = 0; 330 329 struct pci_bus *pbus; ··· 345 346 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window, 346 347 &windows); 347 348 if (windows) { 349 + struct pci_root_info info; 350 + 348 351 controller->window = 349 352 kmalloc_node(sizeof(*controller->window) * windows, 350 353 GFP_KERNEL, controller->node); 351 354 if (!controller->window) 352 355 goto out2; 356 + 357 + name = kmalloc(16, GFP_KERNEL); 358 + if (!name) 359 + goto out3; 360 + 361 + sprintf(name, "PCI Bus %04x:%02x", domain, bus); 362 + info.controller = controller; 363 + info.name = name; 364 + acpi_walk_resources(device->handle, METHOD_NAME__CRS, 365 + add_window, &info); 353 366 } 354 - 355 - name = kmalloc(16, GFP_KERNEL); 356 - if (!name) 357 - goto out3; 358 - 359 - sprintf(name, "PCI Bus %04x:%02x", domain, bus); 360 - info.controller = controller; 361 - info.name = name; 362 - acpi_walk_resources(device->handle, METHOD_NAME__CRS, add_window, 363 - &info); 364 367 /* 365 368 * See arch/x86/pci/acpi.c. 366 369 * The desired pci bus might already be scanned in a quirk. We
-1
arch/mn10300/kernel/mn10300-serial.c
··· 17 17 #define SUPPORT_SYSRQ 18 18 #endif 19 19 20 - #include <linux/version.h> 21 20 #include <linux/module.h> 22 21 #include <linux/serial.h> 23 22 #include <linux/circ_buf.h>
+1 -42
arch/powerpc/include/asm/ide.h
··· 6 6 #ifndef _ASM_POWERPC_IDE_H 7 7 #define _ASM_POWERPC_IDE_H 8 8 9 - #ifdef __KERNEL__ 10 - 11 - #ifndef __powerpc64__ 12 - #include <linux/sched.h> 13 - #include <asm/mpc8xx.h> 14 - #endif 9 + #include <linux/compiler.h> 15 10 #include <asm/io.h> 16 11 17 12 #define __ide_mm_insw(p, a, c) readsw((void __iomem *)(p), (a), (c)) 18 13 #define __ide_mm_insl(p, a, c) readsl((void __iomem *)(p), (a), (c)) 19 14 #define __ide_mm_outsw(p, a, c) writesw((void __iomem *)(p), (a), (c)) 20 15 #define __ide_mm_outsl(p, a, c) writesl((void __iomem *)(p), (a), (c)) 21 - 22 - #ifndef __powerpc64__ 23 - #include <linux/ioport.h> 24 - 25 - /* FIXME: use ide_platform host driver */ 26 - static __inline__ int ide_default_irq(unsigned long base) 27 - { 28 - #ifdef CONFIG_PPLUS 29 - switch (base) { 30 - case 0x1f0: return 14; 31 - case 0x170: return 15; 32 - } 33 - #endif 34 - return 0; 35 - } 36 - 37 - /* FIXME: use ide_platform host driver */ 38 - static __inline__ unsigned long ide_default_io_base(int index) 39 - { 40 - #ifdef CONFIG_PPLUS 41 - switch (index) { 42 - case 0: return 0x1f0; 43 - case 1: return 0x170; 44 - } 45 - #endif 46 - return 0; 47 - } 48 - 49 - #ifdef CONFIG_BLK_DEV_MPC8xx_IDE 50 - #define IDE_ARCH_ACK_INTR 1 51 - #define ide_ack_intr(hwif) ((hwif)->ack_intr ? (hwif)->ack_intr(hwif) : 1) 52 - #endif 53 - 54 - #endif /* __powerpc64__ */ 55 - 56 - #endif /* __KERNEL__ */ 57 16 58 17 #endif /* _ASM_POWERPC_IDE_H */
+22 -9
arch/powerpc/kernel/crash_dump.c
··· 86 86 } 87 87 __setup("savemaxmem=", parse_savemaxmem); 88 88 89 + 90 + static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize, 91 + unsigned long offset, int userbuf) 92 + { 93 + if (userbuf) { 94 + if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) 95 + return -EFAULT; 96 + } else 97 + memcpy(buf, (vaddr + offset), csize); 98 + 99 + return csize; 100 + } 101 + 89 102 /** 90 103 * copy_oldmem_page - copy one page from "oldmem" 91 104 * @pfn: page frame number to be copied ··· 120 107 if (!csize) 121 108 return 0; 122 109 123 - vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); 110 + csize = min(csize, PAGE_SIZE); 124 111 125 - if (userbuf) { 126 - if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) { 127 - iounmap(vaddr); 128 - return -EFAULT; 129 - } 130 - } else 131 - memcpy(buf, (vaddr + offset), csize); 112 + if (pfn < max_pfn) { 113 + vaddr = __va(pfn << PAGE_SHIFT); 114 + csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); 115 + } else { 116 + vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); 117 + csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); 118 + iounmap(vaddr); 119 + } 132 120 133 - iounmap(vaddr); 134 121 return csize; 135 122 }
-12
arch/powerpc/kernel/ibmebus.c
··· 233 233 } 234 234 EXPORT_SYMBOL(ibmebus_free_irq); 235 235 236 - static ssize_t name_show(struct device *dev, 237 - struct device_attribute *attr, char *buf) 238 - { 239 - return sprintf(buf, "%s\n", to_of_device(dev)->node->name); 240 - } 241 - 242 - static struct device_attribute ibmebus_dev_attrs[] = { 243 - __ATTR_RO(name), 244 - __ATTR_NULL 245 - }; 246 - 247 236 static char *ibmebus_chomp(const char *in, size_t count) 248 237 { 249 238 char *out = kmalloc(count + 1, GFP_KERNEL); ··· 316 327 317 328 struct bus_type ibmebus_bus_type = { 318 329 .uevent = of_device_uevent, 319 - .dev_attrs = ibmebus_dev_attrs, 320 330 .bus_attrs = ibmebus_bus_attrs 321 331 }; 322 332 EXPORT_SYMBOL(ibmebus_bus_type);
+1 -1
arch/powerpc/kernel/vio.c
··· 1113 1113 return error; 1114 1114 } 1115 1115 error = viodrv->probe(viodev, id); 1116 - if (error) 1116 + if (error && firmware_has_feature(FW_FEATURE_CMO)) 1117 1117 vio_cmo_bus_remove(viodev); 1118 1118 } 1119 1119
+8 -7
arch/powerpc/platforms/cell/spufs/run.c
··· 206 206 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); 207 207 if (runcntl == 0) 208 208 runcntl = SPU_RUNCNTL_RUNNABLE; 209 - } 210 - 211 - if (ctx->flags & SPU_CREATE_NOSCHED) { 212 - spuctx_switch_state(ctx, SPU_UTIL_USER); 213 - ctx->ops->runcntl_write(ctx, runcntl); 214 209 } else { 215 210 unsigned long privcntl; 216 211 ··· 214 219 else 215 220 privcntl = SPU_PRIVCNTL_MODE_NORMAL; 216 221 217 - ctx->ops->npc_write(ctx, *npc); 218 222 ctx->ops->privcntl_write(ctx, privcntl); 219 - ctx->ops->runcntl_write(ctx, runcntl); 223 + ctx->ops->npc_write(ctx, *npc); 224 + } 225 + 226 + ctx->ops->runcntl_write(ctx, runcntl); 227 + 228 + if (ctx->flags & SPU_CREATE_NOSCHED) { 229 + spuctx_switch_state(ctx, SPU_UTIL_USER); 230 + } else { 220 231 221 232 if (ctx->state == SPU_STATE_SAVED) { 222 233 ret = spu_activate(ctx, 0);
+9 -2
arch/powerpc/platforms/cell/spufs/sched.c
··· 641 641 642 642 if (tmp && tmp->prio > ctx->prio && 643 643 !(tmp->flags & SPU_CREATE_NOSCHED) && 644 - (!victim || tmp->prio > victim->prio)) 644 + (!victim || tmp->prio > victim->prio)) { 645 645 victim = spu->ctx; 646 + get_spu_context(victim); 647 + } 646 648 } 647 649 mutex_unlock(&cbe_spu_info[node].list_mutex); 648 650 ··· 660 658 * look at another context or give up after X retries. 661 659 */ 662 660 if (!mutex_trylock(&victim->state_mutex)) { 661 + put_spu_context(victim); 663 662 victim = NULL; 664 663 goto restart; 665 664 } ··· 673 670 * restart the search. 674 671 */ 675 672 mutex_unlock(&victim->state_mutex); 673 + put_spu_context(victim); 676 674 victim = NULL; 677 675 goto restart; 678 676 } ··· 691 687 spu_add_to_rq(victim); 692 688 693 689 mutex_unlock(&victim->state_mutex); 690 + put_spu_context(victim); 694 691 695 692 return spu; 696 693 } ··· 990 985 struct spu_context *ctx = spu->ctx; 991 986 992 987 if (ctx) { 988 + get_spu_context(ctx); 993 989 mutex_unlock(mtx); 994 990 spusched_tick(ctx); 995 991 mutex_lock(mtx); 992 + put_spu_context(ctx); 996 993 } 997 994 } 998 995 mutex_unlock(mtx); ··· 1037 1030 node = spu->node; 1038 1031 if (old_state == SPU_UTIL_USER) 1039 1032 atomic_dec(&cbe_spu_info[node].busy_spus); 1040 - if (new_state == SPU_UTIL_USER); 1033 + if (new_state == SPU_UTIL_USER) 1041 1034 atomic_inc(&cbe_spu_info[node].busy_spus); 1042 1035 } 1043 1036 }
-1
arch/powerpc/sysdev/bestcomm/gen_bd.c
··· 11 11 * 12 12 */ 13 13 14 - #include <linux/version.h> 15 14 #include <linux/module.h> 16 15 #include <linux/kernel.h> 17 16 #include <linux/string.h>
+38 -16
arch/s390/defconfig
··· 1 1 # 2 2 # Automatically generated make config: don't edit 3 - # Linux kernel version: 2.6.26-rc4 4 - # Fri May 30 09:49:33 2008 3 + # Linux kernel version: 2.6.27-rc4 4 + # Thu Aug 21 19:43:29 2008 5 5 # 6 6 CONFIG_SCHED_MC=y 7 7 CONFIG_MMU=y ··· 68 68 CONFIG_SYSCTL=y 69 69 # CONFIG_EMBEDDED is not set 70 70 CONFIG_SYSCTL_SYSCALL=y 71 - CONFIG_SYSCTL_SYSCALL_CHECK=y 72 71 CONFIG_KALLSYMS=y 73 72 # CONFIG_KALLSYMS_ALL is not set 74 73 # CONFIG_KALLSYMS_EXTRA_PASS is not set ··· 92 93 # CONFIG_MARKERS is not set 93 94 CONFIG_HAVE_OPROFILE=y 94 95 CONFIG_KPROBES=y 96 + # CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set 95 97 CONFIG_KRETPROBES=y 98 + # CONFIG_HAVE_IOREMAP_PROT is not set 96 99 CONFIG_HAVE_KPROBES=y 97 100 CONFIG_HAVE_KRETPROBES=y 101 + # CONFIG_HAVE_ARCH_TRACEHOOK is not set 98 102 # CONFIG_HAVE_DMA_ATTRS is not set 103 + # CONFIG_USE_GENERIC_SMP_HELPERS is not set 104 + # CONFIG_HAVE_CLK is not set 99 105 CONFIG_PROC_PAGE_MONITOR=y 106 + # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 100 107 CONFIG_SLABINFO=y 101 108 CONFIG_RT_MUTEXES=y 102 109 # CONFIG_TINY_SHMEM is not set ··· 118 113 CONFIG_BLOCK=y 119 114 # CONFIG_BLK_DEV_IO_TRACE is not set 120 115 CONFIG_BLK_DEV_BSG=y 116 + # CONFIG_BLK_DEV_INTEGRITY is not set 121 117 CONFIG_BLOCK_COMPAT=y 122 118 123 119 # ··· 181 175 CONFIG_ARCH_SPARSEMEM_ENABLE=y 182 176 CONFIG_ARCH_SPARSEMEM_DEFAULT=y 183 177 CONFIG_ARCH_SELECT_MEMORY_MODEL=y 178 + CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y 179 + CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y 184 180 CONFIG_SELECT_MEMORY_MODEL=y 185 181 # CONFIG_FLATMEM_MANUAL is not set 186 182 # CONFIG_DISCONTIGMEM_MANUAL is not set ··· 193 185 CONFIG_SPARSEMEM_EXTREME=y 194 186 CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y 195 187 CONFIG_SPARSEMEM_VMEMMAP=y 188 + CONFIG_MEMORY_HOTPLUG=y 189 + CONFIG_MEMORY_HOTPLUG_SPARSE=y 190 + CONFIG_MEMORY_HOTREMOVE=y 196 191 CONFIG_PAGEFLAGS_EXTENDED=y 197 192 CONFIG_SPLIT_PTLOCK_CPUS=4 193 + CONFIG_MIGRATION=y 198 194 CONFIG_RESOURCES_64BIT=y 199 195 CONFIG_ZONE_DMA_FLAG=1 200 196 CONFIG_BOUNCE=y ··· 210 198 CONFIG_MACHCHK_WARNING=y 211 199 CONFIG_QDIO=y 212 200 # CONFIG_QDIO_DEBUG is not set 201 + CONFIG_CHSC_SCH=m 213 202 214 203 # 215 204 # Misc ··· 219 206 # CONFIG_IPL_TAPE is not set 220 207 CONFIG_IPL_VM=y 221 208 CONFIG_BINFMT_ELF=y 209 + CONFIG_COMPAT_BINFMT_ELF=y 222 210 CONFIG_BINFMT_MISC=m 223 211 CONFIG_FORCE_MAX_ZONEORDER=9 224 212 # CONFIG_PROCESS_DEBUG is not set ··· 240 226 CONFIG_KEXEC=y 241 227 # CONFIG_ZFCPDUMP is not set 242 228 CONFIG_S390_GUEST=y 243 - 244 - # 245 - # Networking 246 - # 247 229 CONFIG_NET=y 248 230 249 231 # ··· 374 364 # CONFIG_NET_SCH_HTB is not set 375 365 # CONFIG_NET_SCH_HFSC is not set 376 366 CONFIG_NET_SCH_PRIO=m 377 - CONFIG_NET_SCH_RR=m 378 367 CONFIG_NET_SCH_RED=m 379 368 CONFIG_NET_SCH_SFQ=m 380 369 CONFIG_NET_SCH_TEQL=m ··· 439 430 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 440 431 CONFIG_STANDALONE=y 441 432 CONFIG_PREVENT_FIRMWARE_BUILD=y 442 - # CONFIG_FW_LOADER is not set 433 + CONFIG_FW_LOADER=y 434 + # CONFIG_FIRMWARE_IN_KERNEL is not set 435 + CONFIG_EXTRA_FIRMWARE="" 443 436 # CONFIG_DEBUG_DRIVER is not set 444 437 # CONFIG_DEBUG_DEVRES is not set 445 438 CONFIG_SYS_HYPERVISOR=y ··· 518 507 # CONFIG_ISCSI_TCP is not set 519 508 # CONFIG_SCSI_DEBUG is not set 520 509 CONFIG_ZFCP=y 510 + CONFIG_SCSI_DH=m 511 + CONFIG_SCSI_DH_RDAC=m 512 + CONFIG_SCSI_DH_HP_SW=m 513 + CONFIG_SCSI_DH_EMC=m 514 + CONFIG_SCSI_DH_ALUA=m 521 515 CONFIG_MD=y 522 516 CONFIG_BLK_DEV_MD=y 523 517 CONFIG_MD_LINEAR=m ··· 538 522 CONFIG_DM_SNAPSHOT=y 539 523 CONFIG_DM_MIRROR=y 540 524 CONFIG_DM_ZERO=y 541 - CONFIG_DM_MULTIPATH=y 542 - # CONFIG_DM_MULTIPATH_EMC is not set 543 - # CONFIG_DM_MULTIPATH_RDAC is not set 544 - # CONFIG_DM_MULTIPATH_HP is not set 525 + CONFIG_DM_MULTIPATH=m 545 526 # CONFIG_DM_DELAY is not set 546 527 # CONFIG_DM_UEVENT is not set 547 528 CONFIG_NETDEVICES=y 548 - # CONFIG_NETDEVICES_MULTIQUEUE is not set 549 529 # CONFIG_IFB is not set 550 530 CONFIG_DUMMY=m 551 531 CONFIG_BONDING=m ··· 556 544 # CONFIG_IBM_NEW_EMAC_TAH is not set 557 545 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set 558 546 CONFIG_NETDEV_1000=y 559 - # CONFIG_E1000E_ENABLED is not set 560 547 CONFIG_NETDEV_10000=y 561 548 # CONFIG_TR is not set 562 549 # CONFIG_WAN is not set ··· 587 576 CONFIG_UNIX98_PTYS=y 588 577 CONFIG_LEGACY_PTYS=y 589 578 CONFIG_LEGACY_PTY_COUNT=256 579 + CONFIG_HVC_DRIVER=y 580 + CONFIG_VIRTIO_CONSOLE=y 590 581 CONFIG_HW_RANDOM=m 582 + CONFIG_HW_RANDOM_VIRTIO=m 591 583 # CONFIG_R3964 is not set 592 584 CONFIG_RAW_DRIVER=m 593 585 CONFIG_MAX_RAW_DEVS=256 ··· 630 616 CONFIG_S390_VMUR=m 631 617 # CONFIG_POWER_SUPPLY is not set 632 618 # CONFIG_THERMAL is not set 619 + # CONFIG_THERMAL_HWMON is not set 633 620 # CONFIG_WATCHDOG is not set 634 621 635 622 # ··· 708 693 # CONFIG_CRAMFS is not set 709 694 # CONFIG_VXFS_FS is not set 710 695 # CONFIG_MINIX_FS is not set 696 + # CONFIG_OMFS_FS is not set 711 697 # CONFIG_HPFS_FS is not set 712 698 # CONFIG_QNX4FS_FS is not set 713 699 # CONFIG_ROMFS_FS is not set ··· 728 712 CONFIG_EXPORTFS=y 729 713 CONFIG_NFS_COMMON=y 730 714 CONFIG_SUNRPC=y 731 - # CONFIG_SUNRPC_BIND34 is not set 732 715 # CONFIG_RPCSEC_GSS_KRB5 is not set 733 716 # CONFIG_RPCSEC_GSS_SPKM3 is not set 734 717 # CONFIG_SMB_FS is not set ··· 795 780 # CONFIG_DEBUG_INFO is not set 796 781 # CONFIG_DEBUG_VM is not set 797 782 # CONFIG_DEBUG_WRITECOUNT is not set 783 + CONFIG_DEBUG_MEMORY_INIT=y 798 784 # CONFIG_DEBUG_LIST is not set 799 785 # CONFIG_DEBUG_SG is not set 800 786 # CONFIG_FRAME_POINTER is not set ··· 805 789 # CONFIG_LKDTM is not set 806 790 # CONFIG_FAULT_INJECTION is not set 807 791 # CONFIG_LATENCYTOP is not set 792 + CONFIG_SYSCTL_SYSCALL_CHECK=y 808 793 CONFIG_SAMPLES=y 809 794 # CONFIG_SAMPLE_KOBJECT is not set 810 795 # CONFIG_SAMPLE_KPROBES is not set ··· 864 847 # CONFIG_CRYPTO_MD4 is not set 865 848 CONFIG_CRYPTO_MD5=m 866 849 # CONFIG_CRYPTO_MICHAEL_MIC is not set 850 + CONFIG_CRYPTO_RMD128=m 851 + CONFIG_CRYPTO_RMD160=m 852 + CONFIG_CRYPTO_RMD256=m 853 + CONFIG_CRYPTO_RMD320=m 867 854 CONFIG_CRYPTO_SHA1=m 868 855 # CONFIG_CRYPTO_SHA256 is not set 869 856 # CONFIG_CRYPTO_SHA512 is not set ··· 916 895 # CONFIG_GENERIC_FIND_NEXT_BIT is not set 917 896 # CONFIG_CRC_CCITT is not set 918 897 # CONFIG_CRC16 is not set 898 + CONFIG_CRC_T10DIF=y 919 899 # CONFIG_CRC_ITU_T is not set 920 900 CONFIG_CRC32=m 921 901 CONFIG_CRC7=m
+3 -3
arch/s390/include/asm/bitops.h
··· 709 709 * __ffz_word returns __BITOPS_WORDSIZE 710 710 * if no zero bit is present in the word. 711 711 */ 712 - set = __ffz_word(0, *p >> bit) + bit; 712 + set = __ffz_word(bit, *p >> bit); 713 713 if (set >= size) 714 714 return size + offset; 715 715 if (set < __BITOPS_WORDSIZE) ··· 824 824 * s390 version of ffz returns __BITOPS_WORDSIZE 825 825 * if no zero bit is present in the word. 826 826 */ 827 - set = ffz(__load_ulong_le(p, 0) >> bit) + bit; 827 + set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit); 828 828 if (set >= size) 829 829 return size + offset; 830 830 if (set < __BITOPS_WORDSIZE) ··· 865 865 * s390 version of ffz returns __BITOPS_WORDSIZE 866 866 * if no zero bit is present in the word. 867 867 */ 868 - set = ffs(__load_ulong_le(p, 0) >> bit) + bit; 868 + set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit)); 869 869 if (set >= size) 870 870 return size + offset; 871 871 if (set < __BITOPS_WORDSIZE)
+3 -1
arch/s390/kernel/process.c
··· 75 75 return sf->gprs[8]; 76 76 } 77 77 78 - DEFINE_PER_CPU(struct s390_idle_data, s390_idle); 78 + DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = { 79 + .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock) 80 + }; 79 81 80 82 static int s390_idle_enter(void) 81 83 {
-2
arch/s390/kernel/smp.c
··· 610 610 if (IS_ERR(p)) 611 611 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 612 612 current_set[cpu] = p; 613 - spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock); 614 613 } 615 614 616 615 static int __cpuinit smp_alloc_lowcore(int cpu) ··· 844 845 current_set[0] = current; 845 846 smp_cpu_state[0] = CPU_STATE_CONFIGURED; 846 847 smp_cpu_polarization[0] = POLARIZATION_UNKNWN; 847 - spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); 848 848 } 849 849 850 850 void __init smp_cpus_done(unsigned int max_cpus)
+4
arch/sh/boards/board-ap325rxa.c
··· 140 140 .vsync_len = 1, 141 141 .sync = 0, /* hsync and vsync are active low */ 142 142 }, 143 + .lcd_size_cfg = { /* 7.0 inch */ 144 + .width = 152, 145 + .height = 91, 146 + }, 143 147 .board_cfg = { 144 148 .display_on = ap320_wvga_power_on, 145 149 },
+8
arch/sh/boards/mach-migor/setup.c
··· 224 224 .vsync_len = 2, 225 225 .sync = 0, 226 226 }, 227 + .lcd_size_cfg = { /* 7.0 inch */ 228 + .width = 152, 229 + .height = 91, 230 + }, 227 231 } 228 232 #endif 229 233 #ifdef CONFIG_SH_MIGOR_QVGA ··· 248 244 .lower_margin = 17, 249 245 .vsync_len = 2, 250 246 .sync = FB_SYNC_HOR_HIGH_ACT, 247 + }, 248 + .lcd_size_cfg = { /* 2.4 inch */ 249 + .width = 49, 250 + .height = 37, 251 251 }, 252 252 .board_cfg = { 253 253 .setup_sys = migor_lcd_qvga_setup,
+82 -2
arch/sh/boards/mach-sh7763rdp/setup.c
··· 15 15 #include <linux/interrupt.h> 16 16 #include <linux/input.h> 17 17 #include <linux/mtd/physmap.h> 18 - #include <asm/io.h> 18 + #include <linux/fb.h> 19 + #include <linux/io.h> 19 20 #include <asm/sh7763rdp.h> 21 + #include <asm/sh_eth.h> 22 + #include <asm/sh7760fb.h> 20 23 21 24 /* NOR Flash */ 22 25 static struct mtd_partition sh7763rdp_nor_flash_partitions[] = { ··· 63 60 }, 64 61 }; 65 62 63 + /* SH-Ether */ 64 + static struct resource sh_eth_resources[] = { 65 + { 66 + .start = 0xFEE00800, /* use eth1 */ 67 + .end = 0xFEE00F7C - 1, 68 + .flags = IORESOURCE_MEM, 69 + }, { 70 + .start = 58, /* irq number */ 71 + .end = 58, 72 + .flags = IORESOURCE_IRQ, 73 + }, 74 + }; 75 + 76 + static struct sh_eth_plat_data sh7763_eth_pdata = { 77 + .phy = 1, 78 + .edmac_endian = EDMAC_LITTLE_ENDIAN, 79 + }; 80 + 81 + static struct platform_device sh7763rdp_eth_device = { 82 + .name = "sh-eth", 83 + .resource = sh_eth_resources, 84 + .num_resources = ARRAY_SIZE(sh_eth_resources), 85 + .dev = { 86 + .platform_data = &sh7763_eth_pdata, 87 + }, 88 + }; 89 + 90 + /* SH7763 LCDC */ 91 + static struct resource sh7763rdp_fb_resources[] = { 92 + { 93 + .start = 0xFFE80000, 94 + .end = 0xFFE80442 - 1, 95 + .flags = IORESOURCE_MEM, 96 + }, 97 + }; 98 + 99 + static struct fb_videomode sh7763fb_videomode = { 100 + .refresh = 60, 101 + .name = "VGA Monitor", 102 + .xres = 640, 103 + .yres = 480, 104 + .pixclock = 10000, 105 + .left_margin = 80, 106 + .right_margin = 24, 107 + .upper_margin = 30, 108 + .lower_margin = 1, 109 + .hsync_len = 96, 110 + .vsync_len = 1, 111 + .sync = 0, 112 + .vmode = FB_VMODE_NONINTERLACED, 113 + .flag = FBINFO_FLAG_DEFAULT, 114 + }; 115 + 116 + static struct sh7760fb_platdata sh7763fb_def_pdata = { 117 + .def_mode = &sh7763fb_videomode, 118 + .ldmtr = (LDMTR_TFT_COLOR_16|LDMTR_MCNT), 119 + .lddfr = LDDFR_16BPP_RGB565, 120 + .ldpmmr = 0x0000, 121 + .ldpspr = 0xFFFF, 122 + .ldaclnr = 0x0001, 123 + .ldickr = 0x1102, 124 + .rotate = 0, 125 + .novsync = 0, 126 + .blank = NULL, 127 + }; 128 + 129 + static struct platform_device sh7763rdp_fb_device = { 130 + .name = "sh7760-lcdc", 131 + .resource = sh7763rdp_fb_resources, 132 + .num_resources = ARRAY_SIZE(sh7763rdp_fb_resources), 133 + .dev = { 134 + .platform_data = &sh7763fb_def_pdata, 135 + }, 136 + }; 137 + 66 138 static struct platform_device *sh7763rdp_devices[] __initdata = { 67 139 &sh7763rdp_nor_flash_device, 140 + &sh7763rdp_eth_device, 141 + &sh7763rdp_fb_device, 68 142 }; 69 143 70 144 static int __init sh7763rdp_devices_setup(void) ··· 149 69 return platform_add_devices(sh7763rdp_devices, 150 70 ARRAY_SIZE(sh7763rdp_devices)); 151 71 } 152 - __initcall(sh7763rdp_devices_setup); 72 + device_initcall(sh7763rdp_devices_setup); 153 73 154 74 static void __init sh7763rdp_setup(char **cmdline_p) 155 75 {
+9 -1
arch/sh/boards/mach-x3proto/setup.c
··· 3 3 * 4 4 * Renesas SH-X3 Prototype Board Support. 5 5 * 6 - * Copyright (C) 2007 Paul Mundt 6 + * Copyright (C) 2007 - 2008 Paul Mundt 7 7 * 8 8 * This file is subject to the terms and conditions of the GNU General Public 9 9 * License. See the file "COPYING" in the main directory of this archive ··· 13 13 #include <linux/platform_device.h> 14 14 #include <linux/kernel.h> 15 15 #include <linux/io.h> 16 + #include <linux/smc91x.h> 16 17 #include <asm/ilsel.h> 17 18 18 19 static struct resource heartbeat_resources[] = { ··· 29 28 .id = -1, 30 29 .num_resources = ARRAY_SIZE(heartbeat_resources), 31 30 .resource = heartbeat_resources, 31 + }; 32 + 33 + static struct smc91x_platdata smc91x_info = { 34 + .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, 32 35 }; 33 36 34 37 static struct resource smc91x_resources[] = { ··· 52 47 .id = -1, 53 48 .resource = smc91x_resources, 54 49 .num_resources = ARRAY_SIZE(smc91x_resources), 50 + .dev = { 51 + .platform_data = &smc91x_info, 52 + }, 55 53 }; 56 54 57 55 static struct resource r8a66597_usb_host_resources[] = {
+96 -15
arch/sh/configs/sh7763rdp_defconfig
··· 1 1 # 2 2 # Automatically generated make config: don't edit 3 - # Linux kernel version: 2.6.26-rc4 4 - # Fri Jun 6 12:20:17 2008 3 + # Linux kernel version: 2.6.27-rc2 4 + # Fri Aug 8 13:44:20 2008 5 5 # 6 6 CONFIG_SUPERH=y 7 7 CONFIG_SUPERH32=y 8 + CONFIG_ARCH_DEFCONFIG="arch/sh/configs/shx3_defconfig" 8 9 CONFIG_RWSEM_GENERIC_SPINLOCK=y 9 10 CONFIG_GENERIC_BUG=y 10 11 CONFIG_GENERIC_FIND_NEXT_BIT=y 11 12 CONFIG_GENERIC_HWEIGHT=y 12 13 CONFIG_GENERIC_HARDIRQS=y 14 + CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 13 15 CONFIG_GENERIC_IRQ_PROBE=y 14 16 CONFIG_GENERIC_CALIBRATE_DELAY=y 15 17 CONFIG_GENERIC_TIME=y ··· 21 19 # CONFIG_ARCH_HAS_ILOG2_U32 is not set 22 20 # CONFIG_ARCH_HAS_ILOG2_U64 is not set 23 21 CONFIG_ARCH_NO_VIRT_TO_BUS=y 24 - CONFIG_ARCH_SUPPORTS_AOUT=y 25 22 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 26 23 27 24 # ··· 84 83 # CONFIG_MARKERS is not set 85 84 CONFIG_OPROFILE=y 86 85 CONFIG_HAVE_OPROFILE=y 86 + # CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set 87 + # CONFIG_HAVE_IOREMAP_PROT is not set 87 88 # CONFIG_HAVE_KPROBES is not set 88 89 # CONFIG_HAVE_KRETPROBES is not set 90 + # CONFIG_HAVE_ARCH_TRACEHOOK is not set 89 91 # CONFIG_HAVE_DMA_ATTRS is not set 92 + # CONFIG_USE_GENERIC_SMP_HELPERS is not set 93 + CONFIG_HAVE_CLK=y 90 94 CONFIG_PROC_PAGE_MONITOR=y 95 + CONFIG_HAVE_GENERIC_DMA_COHERENT=y 91 96 CONFIG_SLABINFO=y 92 97 CONFIG_RT_MUTEXES=y 93 98 # CONFIG_TINY_SHMEM is not set ··· 103 96 # CONFIG_MODULE_UNLOAD is not set 104 97 # CONFIG_MODVERSIONS is not set 105 98 # CONFIG_MODULE_SRCVERSION_ALL is not set 106 - # CONFIG_KMOD is not set 99 + CONFIG_KMOD=y 107 100 CONFIG_BLOCK=y 108 101 # CONFIG_LBD is not set 109 102 # CONFIG_BLK_DEV_IO_TRACE is not set 110 103 # CONFIG_LSF is not set 111 104 # CONFIG_BLK_DEV_BSG is not set 105 + # CONFIG_BLK_DEV_INTEGRITY is not set 112 106 113 107 # 114 108 # IO Schedulers ··· 185 177 # CONFIG_PAGE_SIZE_8KB is not set 186 178 # CONFIG_PAGE_SIZE_16KB is not set 187 179 # CONFIG_PAGE_SIZE_64KB is not set 180 + CONFIG_ENTRY_OFFSET=0x00001000 188 181 CONFIG_SELECT_MEMORY_MODEL=y 189 182 # CONFIG_FLATMEM_MANUAL is not set 190 183 # CONFIG_DISCONTIGMEM_MANUAL is not set ··· 267 258 # CONFIG_SCHED_HRTICK is not set 268 259 # CONFIG_KEXEC is not set 269 260 # CONFIG_CRASH_DUMP is not set 261 + CONFIG_SECCOMP=y 270 262 CONFIG_PREEMPT_NONE=y 271 263 # CONFIG_PREEMPT_VOLUNTARY is not set 272 264 # CONFIG_PREEMPT is not set ··· 292 282 # 293 283 CONFIG_BINFMT_ELF=y 294 284 # CONFIG_BINFMT_MISC is not set 295 - 296 - # 297 - # Networking 298 - # 299 285 CONFIG_NET=y 300 286 301 287 # ··· 367 361 # 368 362 # CONFIG_CFG80211 is not set 369 363 CONFIG_WIRELESS_EXT=y 364 + CONFIG_WIRELESS_EXT_SYSFS=y 370 365 # CONFIG_MAC80211 is not set 371 366 # CONFIG_IEEE80211 is not set 372 367 # CONFIG_RFKILL is not set ··· 384 377 CONFIG_STANDALONE=y 385 378 CONFIG_PREVENT_FIRMWARE_BUILD=y 386 379 CONFIG_FW_LOADER=y 380 + CONFIG_FIRMWARE_IN_KERNEL=y 381 + CONFIG_EXTRA_FIRMWARE="" 387 382 # CONFIG_SYS_HYPERVISOR is not set 388 383 # CONFIG_CONNECTOR is not set 389 384 CONFIG_MTD=y ··· 480 471 # CONFIG_BLK_DEV_RAM is not set 481 472 # CONFIG_CDROM_PKTCDVD is not set 482 473 # CONFIG_ATA_OVER_ETH is not set 474 + # CONFIG_BLK_DEV_HD is not set 483 475 # CONFIG_MISC_DEVICES is not set 484 476 CONFIG_HAVE_IDE=y 485 477 # CONFIG_IDE is not set ··· 525 515 CONFIG_SCSI_LOWLEVEL=y 526 516 # CONFIG_ISCSI_TCP is not set 527 517 # CONFIG_SCSI_DEBUG is not set 518 + # CONFIG_SCSI_DH is not set 528 519 # CONFIG_ATA is not set 529 520 # CONFIG_MD is not set 530 521 CONFIG_NETDEVICES=y 531 - # CONFIG_NETDEVICES_MULTIQUEUE is not set 532 522 # CONFIG_DUMMY is not set 533 523 # CONFIG_BONDING is not set 534 524 # CONFIG_MACVLAN is not set ··· 556 546 CONFIG_MII=y 557 547 # CONFIG_AX88796 is not set 558 548 # CONFIG_STNIC is not set 549 + CONFIG_SH_ETH=y 559 550 # CONFIG_SMC91X is not set 551 + # CONFIG_SMC911X is not set 560 552 # CONFIG_IBM_NEW_EMAC_ZMII is not set 561 553 # CONFIG_IBM_NEW_EMAC_RGMII is not set 562 554 # CONFIG_IBM_NEW_EMAC_TAH is not set ··· 625 613 # 626 614 # Character devices 627 615 # 628 - # CONFIG_VT is not set 616 + CONFIG_VT=y 617 + CONFIG_CONSOLE_TRANSLATIONS=y 618 + CONFIG_VT_CONSOLE=y 619 + CONFIG_HW_CONSOLE=y 620 + # CONFIG_VT_HW_CONSOLE_BINDING is not set 629 621 CONFIG_DEVKMEM=y 630 622 # CONFIG_SERIAL_NONSTANDARD is not set 631 623 ··· 660 644 # CONFIG_POWER_SUPPLY is not set 661 645 # CONFIG_HWMON is not set 662 646 # CONFIG_THERMAL is not set 647 + # CONFIG_THERMAL_HWMON is not set 663 648 # CONFIG_WATCHDOG is not set 664 649 665 650 # ··· 672 655 # 673 656 # Multifunction device drivers 674 657 # 658 + # CONFIG_MFD_CORE is not set 675 659 # CONFIG_MFD_SM501 is not set 676 660 # CONFIG_HTC_PASIC3 is not set 677 661 ··· 697 679 # 698 680 # CONFIG_VGASTATE is not set 699 681 # CONFIG_VIDEO_OUTPUT_CONTROL is not set 700 - # CONFIG_FB is not set 682 + CONFIG_FB=y 683 + # CONFIG_FIRMWARE_EDID is not set 684 + # CONFIG_FB_DDC is not set 685 + CONFIG_FB_CFB_FILLRECT=y 686 + CONFIG_FB_CFB_COPYAREA=y 687 + CONFIG_FB_CFB_IMAGEBLIT=y 688 + # CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set 689 + # CONFIG_FB_SYS_FILLRECT is not set 690 + # CONFIG_FB_SYS_COPYAREA is not set 691 + # CONFIG_FB_SYS_IMAGEBLIT is not set 692 + CONFIG_FB_FOREIGN_ENDIAN=y 693 + CONFIG_FB_BOTH_ENDIAN=y 694 + # CONFIG_FB_BIG_ENDIAN is not set 695 + # CONFIG_FB_LITTLE_ENDIAN is not set 696 + # CONFIG_FB_SYS_FOPS is not set 697 + # CONFIG_FB_SVGALIB is not set 698 + # CONFIG_FB_MACMODES is not set 699 + # CONFIG_FB_BACKLIGHT is not set 700 + # CONFIG_FB_MODE_HELPERS is not set 701 + # CONFIG_FB_TILEBLITTING is not set 702 + 703 + # 704 + # Frame buffer hardware drivers 705 + # 706 + # CONFIG_FB_S1D13XXX is not set 707 + # CONFIG_FB_SH_MOBILE_LCDC is not set 708 + CONFIG_FB_SH7760=y 709 + # CONFIG_FB_VIRTUAL is not set 701 710 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set 702 711 703 712 # ··· 733 688 # CONFIG_DISPLAY_SUPPORT is not set 734 689 735 690 # 736 - # Sound 691 + # Console display driver support 737 692 # 693 + CONFIG_DUMMY_CONSOLE=y 694 + CONFIG_FRAMEBUFFER_CONSOLE=y 695 + # CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set 696 + # CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set 697 + # CONFIG_FONTS is not set 698 + CONFIG_FONT_8x8=y 699 + CONFIG_FONT_8x16=y 700 + CONFIG_LOGO=y 701 + CONFIG_LOGO_LINUX_MONO=y 702 + CONFIG_LOGO_LINUX_VGA16=y 703 + CONFIG_LOGO_LINUX_CLUT224=y 704 + CONFIG_LOGO_SUPERH_MONO=y 705 + CONFIG_LOGO_SUPERH_VGA16=y 706 + CONFIG_LOGO_SUPERH_CLUT224=y 738 707 # CONFIG_SOUND is not set 739 708 # CONFIG_HID_SUPPORT is not set 740 709 CONFIG_USB_SUPPORT=y ··· 847 788 # CONFIG_USB_IOWARRIOR is not set 848 789 # CONFIG_USB_ISIGHTFW is not set 849 790 # CONFIG_USB_GADGET is not set 850 - # CONFIG_MMC is not set 791 + CONFIG_MMC=y 792 + # CONFIG_MMC_DEBUG is not set 793 + # CONFIG_MMC_UNSAFE_RESUME is not set 794 + 795 + # 796 + # MMC/SD Card Drivers 797 + # 798 + CONFIG_MMC_BLOCK=y 799 + CONFIG_MMC_BLOCK_BOUNCE=y 800 + # CONFIG_SDIO_UART is not set 801 + # CONFIG_MMC_TEST is not set 802 + 803 + # 804 + # MMC/SD Host Controller Drivers 805 + # 806 + # CONFIG_MMC_SDHCI is not set 851 807 # CONFIG_MEMSTICK is not set 852 808 # CONFIG_NEW_LEDS is not set 853 809 # CONFIG_ACCESSIBILITY is not set 854 810 # CONFIG_RTC_CLASS is not set 811 + # CONFIG_DMADEVICES is not set 855 812 # CONFIG_UIO is not set 856 813 857 814 # ··· 940 865 # CONFIG_CRAMFS is not set 941 866 # CONFIG_VXFS_FS is not set 942 867 # CONFIG_MINIX_FS is not set 868 + # CONFIG_OMFS_FS is not set 943 869 # CONFIG_HPFS_FS is not set 944 870 # CONFIG_QNX4FS_FS is not set 945 871 # CONFIG_ROMFS_FS is not set ··· 950 874 CONFIG_NFS_FS=y 951 875 # CONFIG_NFS_V3 is not set 952 876 # CONFIG_NFS_V4 is not set 953 - # CONFIG_NFSD is not set 954 877 CONFIG_ROOT_NFS=y 878 + # CONFIG_NFSD is not set 955 879 CONFIG_LOCKD=y 956 880 CONFIG_NFS_COMMON=y 957 881 CONFIG_SUNRPC=y 958 - # CONFIG_SUNRPC_BIND34 is not set 959 882 # CONFIG_RPCSEC_GSS_KRB5 is not set 960 883 # CONFIG_RPCSEC_GSS_SPKM3 is not set 961 884 # CONFIG_SMB_FS is not set ··· 1024 949 # CONFIG_HEADERS_CHECK is not set 1025 950 # CONFIG_DEBUG_KERNEL is not set 1026 951 # CONFIG_DEBUG_BUGVERBOSE is not set 952 + # CONFIG_DEBUG_MEMORY_INIT is not set 1027 953 # CONFIG_SAMPLES is not set 1028 954 # CONFIG_SH_STANDARD_BIOS is not set 1029 955 # CONFIG_EARLY_SCIF_CONSOLE is not set ··· 1079 1003 # CONFIG_CRYPTO_MD4 is not set 1080 1004 # CONFIG_CRYPTO_MD5 is not set 1081 1005 # CONFIG_CRYPTO_MICHAEL_MIC is not set 1006 + # CONFIG_CRYPTO_RMD128 is not set 1007 + # CONFIG_CRYPTO_RMD160 is not set 1008 + # CONFIG_CRYPTO_RMD256 is not set 1009 + # CONFIG_CRYPTO_RMD320 is not set 1082 1010 # CONFIG_CRYPTO_SHA1 is not set 1083 1011 # CONFIG_CRYPTO_SHA256 is not set 1084 1012 # CONFIG_CRYPTO_SHA512 is not set ··· 1122 1042 # CONFIG_GENERIC_FIND_FIRST_BIT is not set 1123 1043 # CONFIG_CRC_CCITT is not set 1124 1044 # CONFIG_CRC16 is not set 1045 + CONFIG_CRC_T10DIF=y 1125 1046 # CONFIG_CRC_ITU_T is not set 1126 1047 CONFIG_CRC32=y 1127 1048 # CONFIG_CRC7 is not set
+7
arch/sh/include/asm/flat.h
··· 21 21 #define flat_get_relocate_addr(rel) (rel) 22 22 #define flat_set_persistent(relval, p) ({ (void)p; 0; }) 23 23 24 + #define FLAT_PLAT_INIT(_r) \ 25 + do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \ 26 + _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \ 27 + _r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \ 28 + _r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; \ 29 + _r->sr = SR_FD; } while (0) 30 + 24 31 #endif /* __ASM_SH_FLAT_H */
-3
arch/sh/include/asm/migor.h
··· 42 42 43 43 #define PORT_MSELCRB 0xa4050182 44 44 45 - #define MSTPCR1 0xa4150034 46 - #define MSTPCR2 0xa4150038 47 - 48 45 #define PORT_PSELA 0xa405014e 49 46 #define PORT_PSELB 0xa4050150 50 47 #define PORT_PSELC 0xa4050152
+6
arch/sh/include/asm/sh_mobile_lcdc.h
··· 47 47 void (*display_off)(void *board_data); 48 48 }; 49 49 50 + struct sh_mobile_lcdc_lcd_size_cfg { /* width and height of panel in mm */ 51 + unsigned long width; 52 + unsigned long height; 53 + }; 54 + 50 55 struct sh_mobile_lcdc_chan_cfg { 51 56 int chan; 52 57 int bpp; 53 58 int interface_type; /* selects RGBn or SYSn I/F, see above */ 54 59 int clock_divider; 55 60 struct fb_videomode lcd_cfg; 61 + struct sh_mobile_lcdc_lcd_size_cfg lcd_size_cfg; 56 62 struct sh_mobile_lcdc_board_cfg board_cfg; 57 63 struct sh_mobile_lcdc_sys_bus_cfg sys_bus_cfg; /* only for SYSn I/F */ 58 64 };
+10
arch/sh/include/cpu-sh3/cpu/cacheflush.h
··· 29 29 void flush_dcache_page(struct page *pg); 30 30 void flush_icache_range(unsigned long start, unsigned long end); 31 31 void flush_icache_page(struct vm_area_struct *vma, struct page *page); 32 + 33 + #define flush_dcache_mmap_lock(mapping) do { } while (0) 34 + #define flush_dcache_mmap_unlock(mapping) do { } while (0) 35 + 36 + /* SH3 has unified cache so no special action needed here */ 37 + #define flush_cache_sigtramp(vaddr) do { } while (0) 38 + #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) 39 + 40 + #define p3_cache_init() do { } while (0) 41 + 32 42 #else 33 43 #include <cpu-common/cpu/cacheflush.h> 34 44 #endif
+2 -2
arch/sh/kernel/cpu/sh4a/setup-sh7723.c
··· 45 45 }; 46 46 47 47 static struct uio_info veu0_platform_data = { 48 - .name = "VEU", 48 + .name = "VEU2H", 49 49 .version = "0", 50 50 .irq = 54, 51 51 }; ··· 73 73 }; 74 74 75 75 static struct uio_info veu1_platform_data = { 76 - .name = "VEU", 76 + .name = "VEU2H", 77 77 .version = "0", 78 78 .irq = 27, 79 79 };
+3 -1
arch/sh/kernel/sh_ksyms_32.c
··· 107 107 * GCC >= 4.2 emits these for division, as do GCC 4.1.x versions of the ST 108 108 * compiler which include backported patches. 109 109 */ 110 - DECLARE_EXPORT(__sdivsi3_i4i); 111 110 DECLARE_EXPORT(__udiv_qrnnd_16); 111 + #if !defined(CONFIG_CPU_SH2) 112 + DECLARE_EXPORT(__sdivsi3_i4i); 112 113 DECLARE_EXPORT(__udivsi3_i4i); 114 + #endif 113 115 #endif 114 116 #else /* GCC 3.x */ 115 117 DECLARE_EXPORT(__movstr_i4_even);
+2
arch/sh/mm/Kconfig
··· 181 181 choice 182 182 prompt "HugeTLB page size" 183 183 depends on HUGETLB_PAGE && (CPU_SH4 || CPU_SH5) && MMU 184 + default HUGETLB_PAGE_SIZE_1MB if PAGE_SIZE_64KB 184 185 default HUGETLB_PAGE_SIZE_64K 185 186 186 187 config HUGETLB_PAGE_SIZE_64K 187 188 bool "64kB" 189 + depends on !PAGE_SIZE_64KB 188 190 189 191 config HUGETLB_PAGE_SIZE_256K 190 192 bool "256kB"
+27
arch/sh/mm/consistent.c
··· 95 95 } 96 96 EXPORT_SYMBOL(dma_cache_sync); 97 97 98 + static int __init memchunk_setup(char *str) 99 + { 100 + return 1; /* accept anything that begins with "memchunk." */ 101 + } 102 + __setup("memchunk.", memchunk_setup); 103 + 104 + static void memchunk_cmdline_override(char *name, unsigned long *sizep) 105 + { 106 + char *p = boot_command_line; 107 + int k = strlen(name); 108 + 109 + while ((p = strstr(p, "memchunk."))) { 110 + p += 9; /* strlen("memchunk.") */ 111 + if (!strncmp(name, p, k) && p[k] == '=') { 112 + p += k + 1; 113 + *sizep = memparse(p, NULL); 114 + pr_info("%s: forcing memory chunk size to 0x%08lx\n", 115 + name, *sizep); 116 + break; 117 + } 118 + } 119 + } 120 + 98 121 int platform_resource_setup_memory(struct platform_device *pdev, 99 122 char *name, unsigned long memsize) 100 123 { ··· 131 108 name); 132 109 return -EINVAL; 133 110 } 111 + 112 + memchunk_cmdline_override(name, &memsize); 113 + if (!memsize) 114 + return 0; 134 115 135 116 buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL); 136 117 if (!buf) {
+36 -73
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
··· 737 737 #ifdef CONFIG_X86_POWERNOW_K8_ACPI 738 738 static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) 739 739 { 740 - if (!data->acpi_data->state_count || (cpu_family == CPU_HW_PSTATE)) 740 + if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) 741 741 return; 742 742 743 - data->irt = (data->acpi_data->states[index].control >> IRT_SHIFT) & IRT_MASK; 744 - data->rvo = (data->acpi_data->states[index].control >> RVO_SHIFT) & RVO_MASK; 745 - data->exttype = (data->acpi_data->states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; 746 - data->plllock = (data->acpi_data->states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; 747 - data->vidmvs = 1 << ((data->acpi_data->states[index].control >> MVS_SHIFT) & MVS_MASK); 748 - data->vstable = (data->acpi_data->states[index].control >> VST_SHIFT) & VST_MASK; 749 - } 750 - 751 - 752 - static struct acpi_processor_performance *acpi_perf_data; 753 - static int preregister_valid; 754 - 755 - static int powernow_k8_cpu_preinit_acpi(void) 756 - { 757 - acpi_perf_data = alloc_percpu(struct acpi_processor_performance); 758 - if (!acpi_perf_data) 759 - return -ENODEV; 760 - 761 - if (acpi_processor_preregister_performance(acpi_perf_data)) 762 - return -ENODEV; 763 - else 764 - preregister_valid = 1; 765 - return 0; 743 + data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK; 744 + data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK; 745 + data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; 746 + data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; 747 + data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK); 748 + data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK; 766 749 } 767 750 768 751 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 769 752 { 770 753 struct cpufreq_frequency_table *powernow_table; 771 754 int ret_val; 772 - int cpu = 0; 773 755 774 - data->acpi_data = percpu_ptr(acpi_perf_data, cpu); 775 - if (acpi_processor_register_performance(data->acpi_data, data->cpu)) { 756 + if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { 776 757 dprintk("register performance failed: bad ACPI data\n"); 777 758 return -EIO; 778 759 } 779 760 780 761 /* verify the data contained in the ACPI structures */ 781 - if (data->acpi_data->state_count <= 1) { 762 + if (data->acpi_data.state_count <= 1) { 782 763 dprintk("No ACPI P-States\n"); 783 764 goto err_out; 784 765 } 785 766 786 - if ((data->acpi_data->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || 787 - (data->acpi_data->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { 767 + if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || 768 + (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { 788 769 dprintk("Invalid control/status registers (%x - %x)\n", 789 - data->acpi_data->control_register.space_id, 790 - data->acpi_data->status_register.space_id); 770 + data->acpi_data.control_register.space_id, 771 + data->acpi_data.status_register.space_id); 791 772 goto err_out; 792 773 } 793 774 794 775 /* fill in data->powernow_table */ 795 776 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) 796 - * (data->acpi_data->state_count + 1)), GFP_KERNEL); 777 + * (data->acpi_data.state_count + 1)), GFP_KERNEL); 797 778 if (!powernow_table) { 798 779 dprintk("powernow_table memory alloc failure\n"); 799 780 goto err_out; ··· 787 806 if (ret_val) 788 807 goto err_out_mem; 789 808 790 - powernow_table[data->acpi_data->state_count].frequency = CPUFREQ_TABLE_END; 791 - powernow_table[data->acpi_data->state_count].index = 0; 809 + powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END; 810 + powernow_table[data->acpi_data.state_count].index = 0; 792 811 data->powernow_table = powernow_table; 793 812 794 813 /* fill in data */ 795 - data->numps = data->acpi_data->state_count; 814 + data->numps = data->acpi_data.state_count; 796 815 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) 797 816 print_basics(data); 798 817 powernow_k8_acpi_pst_values(data, 0); ··· 800 819 /* notify BIOS that we exist */ 801 820 acpi_processor_notify_smm(THIS_MODULE); 802 821 803 - /* determine affinity, from ACPI if available */ 804 - if (preregister_valid) { 805 - if ((data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ALL) || 806 - (data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ANY)) 807 - data->starting_core_affinity = data->acpi_data->shared_cpu_map; 808 - else 809 - data->starting_core_affinity = cpumask_of_cpu(data->cpu); 810 - } else { 811 - /* best guess from family if not */ 812 - if (cpu_family == CPU_HW_PSTATE) 813 - data->starting_core_affinity = cpumask_of_cpu(data->cpu); 814 - else 815 - data->starting_core_affinity = per_cpu(cpu_core_map, data->cpu); 816 - } 817 - 818 822 return 0; 819 823 820 824 err_out_mem: 821 825 kfree(powernow_table); 822 826 823 827 err_out: 824 - acpi_processor_unregister_performance(data->acpi_data, data->cpu); 828 + acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 825 829 826 830 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ 827 - data->acpi_data->state_count = 0; 831 + data->acpi_data.state_count = 0; 828 832 829 833 return -ENODEV; 830 834 } ··· 821 855 rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); 822 856 data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; 823 857 824 - for (i = 0; i < data->acpi_data->state_count; i++) { 858 + for (i = 0; i < data->acpi_data.state_count; i++) { 825 859 u32 index; 826 860 827 - index = data->acpi_data->states[i].control & HW_PSTATE_MASK; 861 + index = data->acpi_data.states[i].control & HW_PSTATE_MASK; 828 862 if (index > data->max_hw_pstate) { 829 863 printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); 830 864 printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); ··· 840 874 841 875 powernow_table[i].index = index; 842 876 843 - powernow_table[i].frequency = data->acpi_data->states[i].core_frequency * 1000; 877 + powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; 844 878 } 845 879 return 0; 846 880 } ··· 849 883 { 850 884 int i; 851 885 int cntlofreq = 0; 852 - for (i = 0; i < data->acpi_data->state_count; i++) { 886 + for (i = 0; i < data->acpi_data.state_count; i++) { 853 887 u32 fid; 854 888 u32 vid; 855 889 856 890 if (data->exttype) { 857 - fid = data->acpi_data->states[i].status & EXT_FID_MASK; 858 - vid = (data->acpi_data->states[i].status >> VID_SHIFT) & EXT_VID_MASK; 891 + fid = data->acpi_data.states[i].status & EXT_FID_MASK; 892 + vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK; 859 893 } else { 860 - fid = data->acpi_data->states[i].control & FID_MASK; 861 - vid = (data->acpi_data->states[i].control >> VID_SHIFT) & VID_MASK; 894 + fid = data->acpi_data.states[i].control & FID_MASK; 895 + vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK; 862 896 } 863 897 864 898 dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); ··· 899 933 cntlofreq = i; 900 934 } 901 935 902 - if (powernow_table[i].frequency != (data->acpi_data->states[i].core_frequency * 1000)) { 936 + if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { 903 937 printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", 904 938 powernow_table[i].frequency, 905 - (unsigned int) (data->acpi_data->states[i].core_frequency * 1000)); 939 + (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); 906 940 powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; 907 941 continue; 908 942 } ··· 912 946 913 947 static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) 914 948 { 915 - if (data->acpi_data->state_count) 916 - acpi_processor_unregister_performance(data->acpi_data, data->cpu); 949 + if (data->acpi_data.state_count) 950 + acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 917 951 } 918 952 919 953 #else 920 - static int powernow_k8_cpu_preinit_acpi(void) { return -ENODEV; } 921 954 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } 922 955 static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } 923 956 static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } ··· 1101 1136 static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) 1102 1137 { 1103 1138 struct powernow_k8_data *data; 1104 - cpumask_t oldmask = CPU_MASK_ALL; 1139 + cpumask_t oldmask; 1105 1140 int rc; 1106 1141 1107 1142 if (!cpu_online(pol->cpu)) ··· 1174 1209 /* run on any CPU again */ 1175 1210 set_cpus_allowed_ptr(current, &oldmask); 1176 1211 1177 - pol->cpus = data->starting_core_affinity; 1212 + if (cpu_family == CPU_HW_PSTATE) 1213 + pol->cpus = cpumask_of_cpu(pol->cpu); 1214 + else 1215 + pol->cpus = per_cpu(cpu_core_map, pol->cpu); 1178 1216 data->available_cores = &(pol->cpus); 1179 1217 1180 1218 /* Take a crude guess here. ··· 1300 1332 } 1301 1333 1302 1334 if (supported_cpus == num_online_cpus()) { 1303 - powernow_k8_cpu_preinit_acpi(); 1304 1335 printk(KERN_INFO PFX "Found %d %s " 1305 1336 "processors (%d cpu cores) (" VERSION ")\n", 1306 1337 num_online_nodes(), ··· 1316 1349 dprintk("exit\n"); 1317 1350 1318 1351 cpufreq_unregister_driver(&cpufreq_amd64_driver); 1319 - 1320 - #ifdef CONFIG_X86_POWERNOW_K8_ACPI 1321 - free_percpu(acpi_perf_data); 1322 - #endif 1323 1352 } 1324 1353 1325 1354 MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>");
+1 -2
arch/x86/kernel/cpu/cpufreq/powernow-k8.h
··· 33 33 #ifdef CONFIG_X86_POWERNOW_K8_ACPI 34 34 /* the acpi table needs to be kept. it's only available if ACPI was 35 35 * used to determine valid frequency/vid/fid states */ 36 - struct acpi_processor_performance *acpi_data; 36 + struct acpi_processor_performance acpi_data; 37 37 #endif 38 38 /* we need to keep track of associated cores, but let cpufreq 39 39 * handle hotplug events - so just point at cpufreq pol->cpus 40 40 * structure */ 41 41 cpumask_t *available_cores; 42 - cpumask_t starting_core_affinity; 43 42 }; 44 43 45 44
-1
arch/x86/mach-rdc321x/platform.c
··· 25 25 #include <linux/list.h> 26 26 #include <linux/device.h> 27 27 #include <linux/platform_device.h> 28 - #include <linux/version.h> 29 28 #include <linux/leds.h> 30 29 31 30 #include <asm/gpio.h>
+2
arch/x86/pci/irq.c
··· 590 590 case PCI_DEVICE_ID_INTEL_ICH10_1: 591 591 case PCI_DEVICE_ID_INTEL_ICH10_2: 592 592 case PCI_DEVICE_ID_INTEL_ICH10_3: 593 + case PCI_DEVICE_ID_INTEL_PCH_0: 594 + case PCI_DEVICE_ID_INTEL_PCH_1: 593 595 r->name = "PIIX/ICH"; 594 596 r->get = pirq_piix_get; 595 597 r->set = pirq_piix_set;
+1 -1
arch/x86/pci/legacy.c
··· 14 14 int n, devfn; 15 15 long node; 16 16 17 - if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff) 17 + if (pcibios_last_bus <= 0 || pcibios_last_bus > 0xff) 18 18 return; 19 19 DBG("PCI: Peer bridge fixup\n"); 20 20
+48 -17
arch/x86/pci/mmconfig-shared.c
··· 293 293 return AE_OK; 294 294 } 295 295 296 - static int __init is_acpi_reserved(unsigned long start, unsigned long end) 296 + static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used) 297 297 { 298 298 struct resource mcfg_res; 299 299 ··· 310 310 return mcfg_res.flags; 311 311 } 312 312 313 + typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type); 314 + 315 + static int __init is_mmconf_reserved(check_reserved_t is_reserved, 316 + u64 addr, u64 size, int i, 317 + typeof(pci_mmcfg_config[0]) *cfg, int with_e820) 318 + { 319 + u64 old_size = size; 320 + int valid = 0; 321 + 322 + while (!is_reserved(addr, addr + size - 1, E820_RESERVED)) { 323 + size >>= 1; 324 + if (size < (16UL<<20)) 325 + break; 326 + } 327 + 328 + if (size >= (16UL<<20) || size == old_size) { 329 + printk(KERN_NOTICE 330 + "PCI: MCFG area at %Lx reserved in %s\n", 331 + addr, with_e820?"E820":"ACPI motherboard resources"); 332 + valid = 1; 333 + 334 + if (old_size != size) { 335 + /* update end_bus_number */ 336 + cfg->end_bus_number = cfg->start_bus_number + ((size>>20) - 1); 337 + printk(KERN_NOTICE "PCI: updated MCFG configuration %d: base %lx " 338 + "segment %hu buses %u - %u\n", 339 + i, (unsigned long)cfg->address, cfg->pci_segment, 340 + (unsigned int)cfg->start_bus_number, 341 + (unsigned int)cfg->end_bus_number); 342 + } 343 + } 344 + 345 + return valid; 346 + } 347 + 313 348 static void __init pci_mmcfg_reject_broken(int early) 314 349 { 315 350 typeof(pci_mmcfg_config[0]) *cfg; ··· 359 324 360 325 for (i = 0; i < pci_mmcfg_config_num; i++) { 361 326 int valid = 0; 362 - u32 size = (cfg->end_bus_number + 1) << 20; 327 + u64 addr, size; 328 + 363 329 cfg = &pci_mmcfg_config[i]; 330 + addr = cfg->start_bus_number; 331 + addr <<= 20; 332 + addr += cfg->address; 333 + size = cfg->end_bus_number + 1 - cfg->start_bus_number; 334 + size <<= 20; 364 335 printk(KERN_NOTICE "PCI: MCFG configuration %d: base %lx " 365 336 "segment %hu buses %u - %u\n", 366 337 i, (unsigned long)cfg->address, cfg->pci_segment, 367 338 (unsigned int)cfg->start_bus_number, 368 339 (unsigned int)cfg->end_bus_number); 369 340 370 - if (!early && 371 - is_acpi_reserved(cfg->address, cfg->address + size - 1)) { 372 - printk(KERN_NOTICE "PCI: MCFG area at %Lx reserved " 373 - "in ACPI motherboard resources\n", 374 - cfg->address); 375 - valid = 1; 376 - } 341 + if (!early) 342 + valid = is_mmconf_reserved(is_acpi_reserved, addr, size, i, cfg, 0); 377 343 378 344 if (valid) 379 345 continue; ··· 383 347 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %Lx is not" 384 348 " reserved in ACPI motherboard resources\n", 385 349 cfg->address); 350 + 386 351 /* Don't try to do this check unless configuration 387 352 type 1 is available. how about type 2 ?*/ 388 - if (raw_pci_ops && e820_all_mapped(cfg->address, 389 - cfg->address + size - 1, 390 - E820_RESERVED)) { 391 - printk(KERN_NOTICE 392 - "PCI: MCFG area at %Lx reserved in E820\n", 393 - cfg->address); 394 - valid = 1; 395 - } 353 + if (raw_pci_ops) 354 + valid = is_mmconf_reserved(e820_all_mapped, addr, size, i, cfg, 1); 396 355 397 356 if (!valid) 398 357 goto reject;
+24 -12
block/genhd.c
··· 293 293 /* iterator */ 294 294 static int find_start(struct device *dev, void *data) 295 295 { 296 - loff_t k = *(loff_t *)data; 296 + loff_t *k = data; 297 297 298 298 if (dev->type != &disk_type) 299 299 return 0; 300 - if (!k--) 300 + if (!*k) 301 301 return 1; 302 + (*k)--; 302 303 return 0; 303 304 } 304 305 305 306 static void *part_start(struct seq_file *part, loff_t *pos) 306 307 { 307 308 struct device *dev; 308 - loff_t n = *pos; 309 + loff_t k = *pos; 309 310 310 - if (!n) 311 + if (!k) 311 312 seq_puts(part, "major minor #blocks name\n\n"); 312 313 313 314 mutex_lock(&block_class_lock); 314 - dev = class_find_device(&block_class, NULL, (void *)pos, find_start); 315 - if (dev) 315 + dev = class_find_device(&block_class, NULL, &k, find_start); 316 + if (dev) { 317 + put_device(dev); 316 318 return dev_to_disk(dev); 319 + } 317 320 return NULL; 318 321 } 319 322 ··· 333 330 struct device *dev; 334 331 ++*pos; 335 332 dev = class_find_device(&block_class, &gp->dev, NULL, find_next); 336 - if (dev) 333 + if (dev) { 334 + put_device(dev); 337 335 return dev_to_disk(dev); 336 + } 338 337 return NULL; 339 338 } 340 339 ··· 573 568 static void *diskstats_start(struct seq_file *part, loff_t *pos) 574 569 { 575 570 struct device *dev; 571 + loff_t k = *pos; 576 572 577 573 mutex_lock(&block_class_lock); 578 - dev = class_find_device(&block_class, NULL, (void *)pos, find_start); 579 - if (dev) 574 + dev = class_find_device(&block_class, NULL, &k, find_start); 575 + if (dev) { 576 + put_device(dev); 580 577 return dev_to_disk(dev); 578 + } 581 579 return NULL; 582 580 } 583 581 ··· 591 583 592 584 ++*pos; 593 585 dev = class_find_device(&block_class, &gp->dev, NULL, find_next); 594 - if (dev) 586 + if (dev) { 587 + put_device(dev); 595 588 return dev_to_disk(dev); 589 + } 596 590 return NULL; 597 591 } 598 592 ··· 722 712 mutex_lock(&block_class_lock); 723 713 find.name = name; 724 714 find.part = part; 725 - dev = class_find_device(&block_class, NULL, (void *)&find, match_id); 726 - if (dev) 715 + dev = class_find_device(&block_class, NULL, &find, match_id); 716 + if (dev) { 717 + put_device(dev); 727 718 devt = MKDEV(MAJOR(dev->devt), 728 719 MINOR(dev->devt) + part); 720 + } 729 721 mutex_unlock(&block_class_lock); 730 722 731 723 return devt;
+6 -4
crypto/authenc.c
··· 174 174 static void crypto_authenc_encrypt_done(struct crypto_async_request *req, 175 175 int err) 176 176 { 177 + struct aead_request *areq = req->data; 178 + 177 179 if (!err) { 178 - struct aead_request *areq = req->data; 179 180 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 180 181 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 181 182 struct ablkcipher_request *abreq = aead_request_ctx(areq); ··· 186 185 err = crypto_authenc_genicv(areq, iv, 0); 187 186 } 188 187 189 - aead_request_complete(req->data, err); 188 + aead_request_complete(areq, err); 190 189 } 191 190 192 191 static int crypto_authenc_encrypt(struct aead_request *req) ··· 217 216 static void crypto_authenc_givencrypt_done(struct crypto_async_request *req, 218 217 int err) 219 218 { 219 + struct aead_request *areq = req->data; 220 + 220 221 if (!err) { 221 - struct aead_request *areq = req->data; 222 222 struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); 223 223 224 224 err = crypto_authenc_genicv(areq, greq->giv, 0); 225 225 } 226 226 227 - aead_request_complete(req->data, err); 227 + aead_request_complete(areq, err); 228 228 } 229 229 230 230 static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
+5 -3
drivers/ata/ahci.c
··· 486 486 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ 487 487 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ 488 488 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ 489 + { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ 490 + { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ 489 491 490 492 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 491 493 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, ··· 577 575 { PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci }, /* MCP7B */ 578 576 579 577 /* SiS */ 580 - { PCI_VDEVICE(SI, 0x1184), board_ahci_nopmp }, /* SiS 966 */ 581 - { PCI_VDEVICE(SI, 0x1185), board_ahci_nopmp }, /* SiS 968 */ 582 - { PCI_VDEVICE(SI, 0x0186), board_ahci_nopmp }, /* SiS 968 */ 578 + { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ 579 + { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */ 580 + { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ 583 581 584 582 /* Marvell */ 585 583 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
+8
drivers/ata/ata_piix.c
··· 275 275 { 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 276 276 /* SATA Controller IDE (ICH10) */ 277 277 { 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 278 + /* SATA Controller IDE (PCH) */ 279 + { 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 280 + /* SATA Controller IDE (PCH) */ 281 + { 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 282 + /* SATA Controller IDE (PCH) */ 283 + { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 284 + /* SATA Controller IDE (PCH) */ 285 + { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 278 286 279 287 { } /* terminate list */ 280 288 };
+37 -23
drivers/ata/libata-core.c
··· 104 104 unsigned long xfer_mask; 105 105 unsigned int horkage_on; 106 106 unsigned int horkage_off; 107 + unsigned int lflags; 107 108 }; 108 109 109 110 struct ata_force_ent { ··· 197 196 } 198 197 199 198 /** 200 - * ata_force_spd_limit - force SATA spd limit according to libata.force 199 + * ata_force_link_limits - force link limits according to libata.force 201 200 * @link: ATA link of interest 202 201 * 203 - * Force SATA spd limit according to libata.force and whine about 204 - * it. When only the port part is specified (e.g. 1:), the limit 205 - * applies to all links connected to both the host link and all 206 - * fan-out ports connected via PMP. If the device part is 207 - * specified as 0 (e.g. 1.00:), it specifies the first fan-out 208 - * link not the host link. Device number 15 always points to the 209 - * host link whether PMP is attached or not. 202 + * Force link flags and SATA spd limit according to libata.force 203 + * and whine about it. When only the port part is specified 204 + * (e.g. 1:), the limit applies to all links connected to both 205 + * the host link and all fan-out ports connected via PMP. If the 206 + * device part is specified as 0 (e.g. 1.00:), it specifies the 207 + * first fan-out link not the host link. Device number 15 always 208 + * points to the host link whether PMP is attached or not. 210 209 * 211 210 * LOCKING: 212 211 * EH context. 213 212 */ 214 - static void ata_force_spd_limit(struct ata_link *link) 213 + static void ata_force_link_limits(struct ata_link *link) 215 214 { 215 + bool did_spd = false; 216 216 int linkno, i; 217 217 218 218 if (ata_is_host_link(link)) ··· 230 228 if (fe->device != -1 && fe->device != linkno) 231 229 continue; 232 230 233 - if (!fe->param.spd_limit) 234 - continue; 231 + /* only honor the first spd limit */ 232 + if (!did_spd && fe->param.spd_limit) { 233 + link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 234 + ata_link_printk(link, KERN_NOTICE, 235 + "FORCE: PHY spd limit set to %s\n", 236 + fe->param.name); 237 + did_spd = true; 238 + } 235 239 236 - link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 237 - ata_link_printk(link, KERN_NOTICE, 238 - "FORCE: PHY spd limit set to %s\n", fe->param.name); 239 - return; 240 + /* let lflags stack */ 241 + if (fe->param.lflags) { 242 + link->flags |= fe->param.lflags; 243 + ata_link_printk(link, KERN_NOTICE, 244 + "FORCE: link flag 0x%x forced -> 0x%x\n", 245 + fe->param.lflags, link->flags); 246 + } 240 247 } 241 248 } 242 249 ··· 3288 3277 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3289 3278 3290 3279 found = 1; 3291 - if (dev->dma_mode != 0xff) 3280 + if (ata_dma_enabled(dev)) 3292 3281 used_dma = 1; 3293 3282 } 3294 3283 if (!found) ··· 3313 3302 3314 3303 /* step 3: set host DMA timings */ 3315 3304 ata_link_for_each_dev(dev, link) { 3316 - if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff) 3305 + if (!ata_dev_enabled(dev) || !ata_dma_enabled(dev)) 3317 3306 continue; 3318 3307 3319 3308 dev->xfer_mode = dev->dma_mode; ··· 5199 5188 */ 5200 5189 int sata_link_init_spd(struct ata_link *link) 5201 5190 { 5202 - u32 scontrol; 5203 5191 u8 spd; 5204 5192 int rc; 5205 5193 5206 - rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 5194 + rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); 5207 5195 if (rc) 5208 5196 return rc; 5209 5197 5210 - spd = (scontrol >> 4) & 0xf; 5198 + spd = (link->saved_scontrol >> 4) & 0xf; 5211 5199 if (spd) 5212 5200 link->hw_sata_spd_limit &= (1 << spd) - 1; 5213 5201 5214 - ata_force_spd_limit(link); 5202 + ata_force_link_limits(link); 5215 5203 5216 5204 link->sata_spd_limit = link->hw_sata_spd_limit; 5217 5205 ··· 5793 5783 ata_port_wait_eh(ap); 5794 5784 5795 5785 /* EH is now guaranteed to see UNLOADING - EH context belongs 5796 - * to us. Disable all existing devices. 5786 + * to us. Restore SControl and disable all existing devices. 5797 5787 */ 5798 - ata_port_for_each_link(link, ap) { 5788 + __ata_port_for_each_link(link, ap) { 5789 + sata_scr_write(link, SCR_CONTROL, link->saved_scontrol); 5799 5790 ata_link_for_each_dev(dev, link) 5800 5791 ata_dev_disable(dev); 5801 5792 } ··· 6002 5991 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6003 5992 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6004 5993 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 5994 + { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, 5995 + { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, 5996 + { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6005 5997 }; 6006 5998 char *start = *cur, *p = *cur; 6007 5999 char *id, *val, *endp;
+13 -17
drivers/ata/libata-eh.c
··· 2040 2040 } 2041 2041 2042 2042 if (ehc->i.serror) 2043 - ata_port_printk(ap, KERN_ERR, 2043 + ata_link_printk(link, KERN_ERR, 2044 2044 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 2045 2045 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 2046 2046 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", ··· 2171 2171 } 2172 2172 2173 2173 static int ata_eh_followup_srst_needed(struct ata_link *link, 2174 - int rc, int classify, 2175 - const unsigned int *classes) 2174 + int rc, const unsigned int *classes) 2176 2175 { 2177 2176 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2178 2177 return 0; 2179 - if (rc == -EAGAIN) { 2180 - if (classify) 2181 - return 1; 2182 - rc = 0; 2183 - } 2184 - if (rc != 0) 2185 - return 0; 2178 + if (rc == -EAGAIN) 2179 + return 1; 2186 2180 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 2187 2181 return 1; 2188 2182 return 0; ··· 2204 2210 */ 2205 2211 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2206 2212 max_tries++; 2213 + if (link->flags & ATA_LFLAG_NO_HRST) 2214 + hardreset = NULL; 2215 + if (link->flags & ATA_LFLAG_NO_SRST) 2216 + softreset = NULL; 2207 2217 2208 2218 now = jiffies; 2209 2219 deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN); ··· 2245 2247 ehc->i.action &= ~ATA_EH_RESET; 2246 2248 if (hardreset) { 2247 2249 reset = hardreset; 2248 - ehc->i.action = ATA_EH_HARDRESET; 2250 + ehc->i.action |= ATA_EH_HARDRESET; 2249 2251 } else if (softreset) { 2250 2252 reset = softreset; 2251 - ehc->i.action = ATA_EH_SOFTRESET; 2253 + ehc->i.action |= ATA_EH_SOFTRESET; 2252 2254 } 2253 2255 2254 2256 if (prereset) { ··· 2303 2305 ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2304 2306 2305 2307 rc = ata_do_reset(link, reset, classes, deadline); 2308 + if (rc && rc != -EAGAIN) 2309 + goto fail; 2306 2310 2307 2311 if (reset == hardreset && 2308 - ata_eh_followup_srst_needed(link, rc, classify, classes)) { 2312 + ata_eh_followup_srst_needed(link, rc, classes)) { 2309 2313 /* okay, let's do follow-up softreset */ 2310 2314 reset = softreset; 2311 2315 ··· 2322 2322 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2323 2323 rc = ata_do_reset(link, reset, classes, deadline); 2324 2324 } 2325 - 2326 - /* -EAGAIN can happen if we skipped followup SRST */ 2327 - if (rc && rc != -EAGAIN) 2328 - goto fail; 2329 2325 } else { 2330 2326 if (verbose) 2331 2327 ata_link_printk(link, KERN_INFO, "no reset method "
+1 -1
drivers/ata/pata_acpi.c
··· 181 181 182 182 if (adev != acpi->last) { 183 183 pacpi_set_piomode(ap, adev); 184 - if (adev->dma_mode) 184 + if (ata_dma_enabled(adev)) 185 185 pacpi_set_dmamode(ap, adev); 186 186 acpi->last = adev; 187 187 }
+1 -1
drivers/ata/pata_atiixp.c
··· 183 183 u16 tmp16; 184 184 185 185 pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16); 186 - if (adev->dma_mode >= XFER_UDMA_0) 186 + if (ata_using_udma(adev)) 187 187 tmp16 |= (1 << dn); 188 188 else 189 189 tmp16 &= ~(1 << dn);
+3 -3
drivers/ata/pata_cs5530.c
··· 149 149 struct ata_device *prev = ap->private_data; 150 150 151 151 /* See if the DMA settings could be wrong */ 152 - if (adev->dma_mode != 0 && adev != prev && prev != NULL) { 152 + if (ata_dma_enabled(adev) && adev != prev && prev != NULL) { 153 153 /* Maybe, but do the channels match MWDMA/UDMA ? */ 154 - if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) || 155 - (adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0)) 154 + if ((ata_using_udma(adev) && !ata_using_udma(prev)) || 155 + (ata_using_udma(prev) && !ata_using_udma(adev))) 156 156 /* Switch the mode bits */ 157 157 cs5530_set_dmamode(ap, adev); 158 158 }
+1 -1
drivers/ata/pata_it821x.c
··· 606 606 { 607 607 unsigned char id[41]; 608 608 int mode = 0; 609 - char *mtype; 609 + char *mtype = ""; 610 610 char mbuf[8]; 611 611 char *cbl = "(40 wire cable)"; 612 612
+1 -1
drivers/ata/pata_oldpiix.c
··· 198 198 199 199 if (adev != ap->private_data) { 200 200 oldpiix_set_piomode(ap, adev); 201 - if (adev->dma_mode) 201 + if (ata_dma_enabled(adev)) 202 202 oldpiix_set_dmamode(ap, adev); 203 203 } 204 204 return ata_sff_qc_issue(qc);
+3 -3
drivers/ata/pata_sc1200.c
··· 167 167 struct ata_device *prev = ap->private_data; 168 168 169 169 /* See if the DMA settings could be wrong */ 170 - if (adev->dma_mode != 0 && adev != prev && prev != NULL) { 170 + if (ata_dma_enabled(adev) && adev != prev && prev != NULL) { 171 171 /* Maybe, but do the channels match MWDMA/UDMA ? */ 172 - if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) || 173 - (adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0)) 172 + if ((ata_using_udma(adev) && !ata_using_udma(prev)) || 173 + (ata_using_udma(prev) && !ata_using_udma(adev))) 174 174 /* Switch the mode bits */ 175 175 sc1200_set_dmamode(ap, adev); 176 176 }
+11 -48
drivers/ata/pata_via.c
··· 324 324 } 325 325 326 326 /** 327 - * via_ata_sff_tf_load - send taskfile registers to host controller 327 + * via_tf_load - send taskfile registers to host controller 328 328 * @ap: Port to which output is sent 329 329 * @tf: ATA taskfile register set 330 330 * 331 331 * Outputs ATA taskfile to standard ATA host controller. 332 332 * 333 333 * Note: This is to fix the internal bug of via chipsets, which 334 - * will reset the device register after changing the IEN bit on 335 - * ctl register 334 + * will reset the device register after changing the IEN bit on 335 + * ctl register 336 336 */ 337 - static void via_ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) 337 + static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) 338 338 { 339 - struct ata_ioports *ioaddr = &ap->ioaddr; 340 - unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 339 + struct ata_taskfile tmp_tf; 341 340 342 - if (tf->ctl != ap->last_ctl) { 343 - iowrite8(tf->ctl, ioaddr->ctl_addr); 344 - iowrite8(tf->device, ioaddr->device_addr); 345 - ap->last_ctl = tf->ctl; 346 - ata_wait_idle(ap); 341 + if (ap->ctl != ap->last_ctl && !(tf->flags & ATA_TFLAG_DEVICE)) { 342 + tmp_tf = *tf; 343 + tmp_tf.flags |= ATA_TFLAG_DEVICE; 344 + tf = &tmp_tf; 347 345 } 348 - 349 - if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 350 - iowrite8(tf->hob_feature, ioaddr->feature_addr); 351 - iowrite8(tf->hob_nsect, ioaddr->nsect_addr); 352 - iowrite8(tf->hob_lbal, ioaddr->lbal_addr); 353 - iowrite8(tf->hob_lbam, ioaddr->lbam_addr); 354 - iowrite8(tf->hob_lbah, ioaddr->lbah_addr); 355 - VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", 356 - tf->hob_feature, 357 - tf->hob_nsect, 358 - tf->hob_lbal, 359 - tf->hob_lbam, 360 - tf->hob_lbah); 361 - } 362 - 363 - if (is_addr) { 364 - iowrite8(tf->feature, ioaddr->feature_addr); 365 - iowrite8(tf->nsect, ioaddr->nsect_addr); 366 - iowrite8(tf->lbal, ioaddr->lbal_addr); 367 - iowrite8(tf->lbam, ioaddr->lbam_addr); 368 - iowrite8(tf->lbah, ioaddr->lbah_addr); 369 - VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 370 - tf->feature, 371 - tf->nsect, 372 - tf->lbal, 373 - tf->lbam, 374 - tf->lbah); 375 - } 376 - 377 - if (tf->flags & ATA_TFLAG_DEVICE) { 378 - iowrite8(tf->device, ioaddr->device_addr); 379 - VPRINTK("device 0x%X\n", tf->device); 380 - } 381 - 382 - ata_wait_idle(ap); 346 + ata_sff_tf_load(ap, tf); 383 347 } 384 348 385 349 static struct scsi_host_template via_sht = { ··· 356 392 .set_piomode = via_set_piomode, 357 393 .set_dmamode = via_set_dmamode, 358 394 .prereset = via_pre_reset, 359 - .sff_tf_load = via_ata_tf_load, 395 + .sff_tf_load = via_tf_load, 360 396 }; 361 397 362 398 static struct ata_port_operations via_port_ops_noirq = { 363 399 .inherits = &via_port_ops, 364 400 .sff_data_xfer = ata_sff_data_xfer_noirq, 365 - .sff_tf_load = via_ata_tf_load, 366 401 }; 367 402 368 403 /**
+12 -25
drivers/ata/sata_mv.c
··· 1134 1134 if (ap->nr_active_links == 0) 1135 1135 return 0; 1136 1136 1137 - if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1138 - /* 1139 - * The port is operating in host queuing mode (EDMA). 1140 - * It can accomodate a new qc if the qc protocol 1141 - * is compatible with the current host queue mode. 1142 - */ 1143 - if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { 1144 - /* 1145 - * The host queue (EDMA) is in NCQ mode. 1146 - * If the new qc is also an NCQ command, 1147 - * then allow the new qc. 1148 - */ 1149 - if (qc->tf.protocol == ATA_PROT_NCQ) 1150 - return 0; 1151 - } else { 1152 - /* 1153 - * The host queue (EDMA) is in non-NCQ, DMA mode. 1154 - * If the new qc is also a non-NCQ, DMA command, 1155 - * then allow the new qc. 1156 - */ 1157 - if (qc->tf.protocol == ATA_PROT_DMA) 1158 - return 0; 1159 - } 1160 - } 1137 + /* 1138 + * The port is operating in host queuing mode (EDMA) with NCQ 1139 + * enabled, allow multiple NCQ commands. EDMA also allows 1140 + * queueing multiple DMA commands but libata core currently 1141 + * doesn't allow it. 1142 + */ 1143 + if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && 1144 + (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol)) 1145 + return 0; 1146 + 1161 1147 return ATA_DEFER_PORT; 1162 1148 } 1163 1149 ··· 3022 3036 break; 3023 3037 case chip_soc: 3024 3038 hpriv->ops = &mv_soc_ops; 3025 - hp_flags |= MV_HP_FLAG_SOC | MV_HP_ERRATA_60X1C0; 3039 + hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE | 3040 + MV_HP_ERRATA_60X1C0; 3026 3041 break; 3027 3042 3028 3043 default:
-1
drivers/atm/adummy.c
··· 3 3 */ 4 4 5 5 #include <linux/module.h> 6 - #include <linux/version.h> 7 6 #include <linux/kernel.h> 8 7 #include <linux/skbuff.h> 9 8 #include <linux/errno.h>
+11
drivers/base/class.c
··· 295 295 296 296 if (!class) 297 297 return -EINVAL; 298 + if (!class->p) { 299 + WARN(1, "%s called for class '%s' before it was initialized", 300 + __func__, class->name); 301 + return -EINVAL; 302 + } 303 + 298 304 mutex_lock(&class->p->class_mutex); 299 305 list_for_each_entry(dev, &class->p->class_devices, node) { 300 306 if (start) { ··· 350 344 351 345 if (!class) 352 346 return NULL; 347 + if (!class->p) { 348 + WARN(1, "%s called for class '%s' before it was initialized", 349 + __func__, class->name); 350 + return NULL; 351 + } 353 352 354 353 mutex_lock(&class->p->class_mutex); 355 354 list_for_each_entry(dev, &class->p->class_devices, node) {
+20 -11
drivers/base/core.c
··· 53 53 * it is attached to. If it is not attached to a bus either, an empty 54 54 * string will be returned. 55 55 */ 56 - const char *dev_driver_string(struct device *dev) 56 + const char *dev_driver_string(const struct device *dev) 57 57 { 58 58 return dev->driver ? dev->driver->name : 59 59 (dev->bus ? dev->bus->name : ··· 541 541 spin_lock_init(&dev->devres_lock); 542 542 INIT_LIST_HEAD(&dev->devres_head); 543 543 device_init_wakeup(dev, 0); 544 + device_pm_init(dev); 544 545 set_dev_node(dev, -1); 545 546 } 546 547 ··· 844 843 { 845 844 struct device *parent = NULL; 846 845 struct class_interface *class_intf; 847 - int error; 846 + int error = -EINVAL; 848 847 849 848 dev = get_device(dev); 850 - if (!dev || !strlen(dev->bus_id)) { 851 - error = -EINVAL; 852 - goto Done; 853 - } 849 + if (!dev) 850 + goto done; 851 + 852 + /* Temporarily support init_name if it is set. 853 + * It will override bus_id for now */ 854 + if (dev->init_name) 855 + dev_set_name(dev, "%s", dev->init_name); 856 + 857 + if (!strlen(dev->bus_id)) 858 + goto done; 854 859 855 860 pr_debug("device: '%s': %s\n", dev->bus_id, __func__); 856 861 ··· 904 897 error = bus_add_device(dev); 905 898 if (error) 906 899 goto BusError; 907 - error = device_pm_add(dev); 900 + error = dpm_sysfs_add(dev); 908 901 if (error) 909 - goto PMError; 902 + goto DPMError; 903 + device_pm_add(dev); 910 904 kobject_uevent(&dev->kobj, KOBJ_ADD); 911 905 bus_attach_device(dev); 912 906 if (parent) ··· 925 917 class_intf->add_dev(dev, class_intf); 926 918 mutex_unlock(&dev->class->p->class_mutex); 927 919 } 928 - Done: 920 + done: 929 921 put_device(dev); 930 922 return error; 931 - PMError: 923 + DPMError: 932 924 bus_remove_device(dev); 933 925 BusError: 934 926 if (dev->bus) ··· 952 944 cleanup_device_parent(dev); 953 945 if (parent) 954 946 put_device(parent); 955 - goto Done; 947 + goto done; 956 948 } 957 949 958 950 /** ··· 1015 1007 struct class_interface *class_intf; 1016 1008 1017 1009 device_pm_remove(dev); 1010 + dpm_sysfs_remove(dev); 1018 1011 if (parent) 1019 1012 klist_del(&dev->knode_parent); 1020 1013 if (MAJOR(dev->devt)) {
-3
drivers/base/driver.c
··· 16 16 #include <linux/string.h> 17 17 #include "base.h" 18 18 19 - #define to_dev(node) container_of(node, struct device, driver_list) 20 - 21 - 22 19 static struct device *next_device(struct klist_iter *i) 23 20 { 24 21 struct klist_node *n = klist_next(i);
+5 -14
drivers/base/power/main.c
··· 67 67 * device_pm_add - add a device to the list of active devices 68 68 * @dev: Device to be added to the list 69 69 */ 70 - int device_pm_add(struct device *dev) 70 + void device_pm_add(struct device *dev) 71 71 { 72 - int error; 73 - 74 72 pr_debug("PM: Adding info for %s:%s\n", 75 73 dev->bus ? dev->bus->name : "No Bus", 76 74 kobject_name(&dev->kobj)); 77 75 mutex_lock(&dpm_list_mtx); 78 76 if (dev->parent) { 79 - if (dev->parent->power.status >= DPM_SUSPENDING) { 80 - dev_warn(dev, "parent %s is sleeping, will not add\n", 77 + if (dev->parent->power.status >= DPM_SUSPENDING) 78 + dev_warn(dev, "parent %s should not be sleeping\n", 81 79 dev->parent->bus_id); 82 - WARN_ON(true); 83 - } 84 80 } else if (transition_started) { 85 81 /* 86 82 * We refuse to register parentless devices while a PM ··· 85 89 */ 86 90 WARN_ON(true); 87 91 } 88 - error = dpm_sysfs_add(dev); 89 - if (!error) { 90 - dev->power.status = DPM_ON; 91 - list_add_tail(&dev->power.entry, &dpm_list); 92 - } 92 + 93 + list_add_tail(&dev->power.entry, &dpm_list); 93 94 mutex_unlock(&dpm_list_mtx); 94 - return error; 95 95 } 96 96 97 97 /** ··· 102 110 dev->bus ? dev->bus->name : "No Bus", 103 111 kobject_name(&dev->kobj)); 104 112 mutex_lock(&dpm_list_mtx); 105 - dpm_sysfs_remove(dev); 106 113 list_del_init(&dev->power.entry); 107 114 mutex_unlock(&dpm_list_mtx); 108 115 }
+7 -2
drivers/base/power/power.h
··· 1 + static inline void device_pm_init(struct device *dev) 2 + { 3 + dev->power.status = DPM_ON; 4 + } 5 + 1 6 #ifdef CONFIG_PM_SLEEP 2 7 3 8 /* ··· 16 11 return container_of(entry, struct device, power.entry); 17 12 } 18 13 19 - extern int device_pm_add(struct device *); 14 + extern void device_pm_add(struct device *); 20 15 extern void device_pm_remove(struct device *); 21 16 22 17 #else /* CONFIG_PM_SLEEP */ 23 18 24 - static inline int device_pm_add(struct device *dev) { return 0; } 19 + static inline void device_pm_add(struct device *dev) {} 25 20 static inline void device_pm_remove(struct device *dev) {} 26 21 27 22 #endif
+1 -1
drivers/block/brd.c
··· 571 571 list_del(&brd->brd_list); 572 572 brd_free(brd); 573 573 } 574 + unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); 574 575 575 - unregister_blkdev(RAMDISK_MAJOR, "brd"); 576 576 return -ENOMEM; 577 577 } 578 578
+6 -4
drivers/block/nbd.c
··· 707 707 708 708 BUILD_BUG_ON(sizeof(struct nbd_request) != 28); 709 709 710 - nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); 711 - if (!nbd_dev) 712 - return -ENOMEM; 713 - 714 710 if (max_part < 0) { 715 711 printk(KERN_CRIT "nbd: max_part must be >= 0\n"); 716 712 return -EINVAL; 717 713 } 714 + 715 + nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); 716 + if (!nbd_dev) 717 + return -ENOMEM; 718 718 719 719 part_shift = 0; 720 720 if (max_part > 0) ··· 779 779 blk_cleanup_queue(nbd_dev[i].disk->queue); 780 780 put_disk(nbd_dev[i].disk); 781 781 } 782 + kfree(nbd_dev); 782 783 return err; 783 784 } 784 785 ··· 796 795 } 797 796 } 798 797 unregister_blkdev(NBD_MAJOR, "nbd"); 798 + kfree(nbd_dev); 799 799 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR); 800 800 } 801 801
+4 -6
drivers/bluetooth/Kconfig
··· 3 3 depends on BT 4 4 5 5 config BT_HCIUSB 6 - tristate "HCI USB driver" 7 - depends on USB 6 + tristate "HCI USB driver (old version)" 7 + depends on USB && BT_HCIBTUSB=n 8 8 help 9 9 Bluetooth HCI USB driver. 10 10 This driver is required if you want to use Bluetooth devices with ··· 23 23 Say Y here to compile support for SCO over HCI USB. 24 24 25 25 config BT_HCIBTUSB 26 - tristate "HCI USB driver (alternate version)" 27 - depends on USB && EXPERIMENTAL && BT_HCIUSB=n 26 + tristate "HCI USB driver" 27 + depends on USB 28 28 help 29 29 Bluetooth HCI USB driver. 30 30 This driver is required if you want to use Bluetooth devices with 31 31 USB interface. 32 - 33 - This driver is still experimental and has no SCO support. 34 32 35 33 Say Y here to compile support for Bluetooth USB devices into the 36 34 kernel or say M to compile it as module (btusb).
+1 -1
drivers/bluetooth/bt3c_cs.c
··· 60 60 /* ======================== Module parameters ======================== */ 61 61 62 62 63 - MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>, Jose Orlando Pereira <jop@di.uminho.pt>"); 63 + MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 64 64 MODULE_DESCRIPTION("Bluetooth driver for the 3Com Bluetooth PCMCIA card"); 65 65 MODULE_LICENSE("GPL"); 66 66 MODULE_FIRMWARE("BT3CPCC.bin");
+268 -14
drivers/bluetooth/btusb.c
··· 2 2 * 3 3 * Generic Bluetooth USB driver 4 4 * 5 - * Copyright (C) 2005-2007 Marcel Holtmann <marcel@holtmann.org> 5 + * Copyright (C) 2005-2008 Marcel Holtmann <marcel@holtmann.org> 6 6 * 7 7 * 8 8 * This program is free software; you can redistribute it and/or modify ··· 41 41 #define BT_DBG(D...) 42 42 #endif 43 43 44 - #define VERSION "0.2" 44 + #define VERSION "0.3" 45 45 46 46 static int ignore_dga; 47 47 static int ignore_csr; ··· 160 160 { } /* Terminating entry */ 161 161 }; 162 162 163 + #define BTUSB_MAX_ISOC_FRAMES 10 164 + 163 165 #define BTUSB_INTR_RUNNING 0 164 166 #define BTUSB_BULK_RUNNING 1 167 + #define BTUSB_ISOC_RUNNING 2 165 168 166 169 struct btusb_data { 167 170 struct hci_dev *hdev; 168 171 struct usb_device *udev; 172 + struct usb_interface *isoc; 169 173 170 174 spinlock_t lock; 171 175 ··· 180 176 struct usb_anchor tx_anchor; 181 177 struct usb_anchor intr_anchor; 182 178 struct usb_anchor bulk_anchor; 179 + struct usb_anchor isoc_anchor; 183 180 184 181 struct usb_endpoint_descriptor *intr_ep; 185 182 struct usb_endpoint_descriptor *bulk_tx_ep; 186 183 struct usb_endpoint_descriptor *bulk_rx_ep; 184 + struct usb_endpoint_descriptor *isoc_tx_ep; 185 + struct usb_endpoint_descriptor *isoc_rx_ep; 186 + 187 + int isoc_altsetting; 187 188 }; 188 189 189 190 static void btusb_intr_complete(struct urb *urb) ··· 204 195 return; 205 196 206 197 if (urb->status == 0) { 198 + hdev->stat.byte_rx += urb->actual_length; 199 + 207 200 if (hci_recv_fragment(hdev, HCI_EVENT_PKT, 208 201 urb->transfer_buffer, 209 202 urb->actual_length) < 0) { ··· 227 216 } 228 217 } 229 218 230 - static inline int btusb_submit_intr_urb(struct hci_dev *hdev) 219 + static int btusb_submit_intr_urb(struct hci_dev *hdev) 231 220 { 232 221 struct btusb_data *data = hdev->driver_data; 233 222 struct urb *urb; ··· 236 225 int err, size; 237 226 238 227 BT_DBG("%s", hdev->name); 228 + 229 + if (!data->intr_ep) 230 + return -ENODEV; 239 231 240 232 urb = usb_alloc_urb(0, GFP_ATOMIC); 241 233 if (!urb) ··· 288 274 return; 289 275 290 276 if (urb->status == 0) { 277 + hdev->stat.byte_rx += urb->actual_length; 278 + 291 279 if (hci_recv_fragment(hdev, HCI_ACLDATA_PKT, 292 280 urb->transfer_buffer, 293 281 urb->actual_length) < 0) { ··· 311 295 } 312 296 } 313 297 314 - static inline int btusb_submit_bulk_urb(struct hci_dev *hdev) 298 + static int btusb_submit_bulk_urb(struct hci_dev *hdev) 315 299 { 316 300 struct btusb_data *data = hdev->driver_data; 317 301 struct urb *urb; ··· 320 304 int err, size; 321 305 322 306 BT_DBG("%s", hdev->name); 307 + 308 + if (!data->bulk_rx_ep) 309 + return -ENODEV; 323 310 324 311 urb = usb_alloc_urb(0, GFP_KERNEL); 325 312 if (!urb) ··· 344 325 urb->transfer_flags |= URB_FREE_BUFFER; 345 326 346 327 usb_anchor_urb(urb, &data->bulk_anchor); 328 + 329 + err = usb_submit_urb(urb, GFP_KERNEL); 330 + if (err < 0) { 331 + BT_ERR("%s urb %p submission failed (%d)", 332 + hdev->name, urb, -err); 333 + usb_unanchor_urb(urb); 334 + kfree(buf); 335 + } 336 + 337 + usb_free_urb(urb); 338 + 339 + return err; 340 + } 341 + 342 + static void btusb_isoc_complete(struct urb *urb) 343 + { 344 + struct hci_dev *hdev = urb->context; 345 + struct btusb_data *data = hdev->driver_data; 346 + int i, err; 347 + 348 + BT_DBG("%s urb %p status %d count %d", hdev->name, 349 + urb, urb->status, urb->actual_length); 350 + 351 + if (!test_bit(HCI_RUNNING, &hdev->flags)) 352 + return; 353 + 354 + if (urb->status == 0) { 355 + for (i = 0; i < urb->number_of_packets; i++) { 356 + unsigned int offset = urb->iso_frame_desc[i].offset; 357 + unsigned int length = urb->iso_frame_desc[i].actual_length; 358 + 359 + if (urb->iso_frame_desc[i].status) 360 + continue; 361 + 362 + hdev->stat.byte_rx += length; 363 + 364 + if (hci_recv_fragment(hdev, HCI_SCODATA_PKT, 365 + urb->transfer_buffer + offset, 366 + length) < 0) { 367 + BT_ERR("%s corrupted SCO packet", hdev->name); 368 + hdev->stat.err_rx++; 369 + } 370 + } 371 + } 372 + 373 + if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags)) 374 + return; 375 + 376 + usb_anchor_urb(urb, &data->isoc_anchor); 377 + 378 + err = usb_submit_urb(urb, GFP_ATOMIC); 379 + if (err < 0) { 380 + BT_ERR("%s urb %p failed to resubmit (%d)", 381 + hdev->name, urb, -err); 382 + usb_unanchor_urb(urb); 383 + } 384 + } 385 + 386 + static void inline __fill_isoc_descriptor(struct urb *urb, int len, int mtu) 387 + { 388 + int i, offset = 0; 389 + 390 + BT_DBG("len %d mtu %d", len, mtu); 391 + 392 + for (i = 0; i < BTUSB_MAX_ISOC_FRAMES && len >= mtu; 393 + i++, offset += mtu, len -= mtu) { 394 + urb->iso_frame_desc[i].offset = offset; 395 + urb->iso_frame_desc[i].length = mtu; 396 + } 397 + 398 + if (len && i < BTUSB_MAX_ISOC_FRAMES) { 399 + urb->iso_frame_desc[i].offset = offset; 400 + urb->iso_frame_desc[i].length = len; 401 + i++; 402 + } 403 + 404 + urb->number_of_packets = i; 405 + } 406 + 407 + static int btusb_submit_isoc_urb(struct hci_dev *hdev) 408 + { 409 + struct btusb_data *data = hdev->driver_data; 410 + struct urb *urb; 411 + unsigned char *buf; 412 + unsigned int pipe; 413 + int err, size; 414 + 415 + BT_DBG("%s", hdev->name); 416 + 417 + if (!data->isoc_rx_ep) 418 + return -ENODEV; 419 + 420 + urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_KERNEL); 421 + if (!urb) 422 + return -ENOMEM; 423 + 424 + size = le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize) * 425 + BTUSB_MAX_ISOC_FRAMES; 426 + 427 + buf = kmalloc(size, GFP_KERNEL); 428 + if (!buf) { 429 + usb_free_urb(urb); 430 + return -ENOMEM; 431 + } 432 + 433 + pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress); 434 + 435 + urb->dev = data->udev; 436 + urb->pipe = pipe; 437 + urb->context = hdev; 438 + urb->complete = btusb_isoc_complete; 439 + urb->interval = data->isoc_rx_ep->bInterval; 440 + 441 + urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; 442 + urb->transfer_buffer = buf; 443 + urb->transfer_buffer_length = size; 444 + 445 + __fill_isoc_descriptor(urb, size, 446 + le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize)); 447 + 448 + usb_anchor_urb(urb, &data->isoc_anchor); 347 449 348 450 err = usb_submit_urb(urb, GFP_KERNEL); 349 451 if (err < 0) { ··· 532 392 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 533 393 return 0; 534 394 395 + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); 396 + usb_kill_anchored_urbs(&data->intr_anchor); 397 + 535 398 clear_bit(BTUSB_BULK_RUNNING, &data->flags); 536 399 usb_kill_anchored_urbs(&data->bulk_anchor); 537 400 ··· 596 453 break; 597 454 598 455 case HCI_ACLDATA_PKT: 456 + if (!data->bulk_tx_ep || hdev->conn_hash.acl_num < 1) 457 + return -ENODEV; 458 + 599 459 urb = usb_alloc_urb(0, GFP_ATOMIC); 600 460 if (!urb) 601 461 return -ENOMEM; ··· 613 467 break; 614 468 615 469 case HCI_SCODATA_PKT: 470 + if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1) 471 + return -ENODEV; 472 + 473 + urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC); 474 + if (!urb) 475 + return -ENOMEM; 476 + 477 + pipe = usb_sndisocpipe(data->udev, 478 + data->isoc_tx_ep->bEndpointAddress); 479 + 480 + urb->dev = data->udev; 481 + urb->pipe = pipe; 482 + urb->context = skb; 483 + urb->complete = btusb_tx_complete; 484 + urb->interval = data->isoc_tx_ep->bInterval; 485 + 486 + urb->transfer_flags = URB_ISO_ASAP; 487 + urb->transfer_buffer = skb->data; 488 + urb->transfer_buffer_length = skb->len; 489 + 490 + __fill_isoc_descriptor(urb, skb->len, 491 + le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); 492 + 616 493 hdev->stat.sco_tx++; 617 - kfree_skb(skb); 618 - return 0; 494 + break; 619 495 620 496 default: 621 497 return -EILSEQ; ··· 676 508 schedule_work(&data->work); 677 509 } 678 510 511 + static int inline __set_isoc_interface(struct hci_dev *hdev, int altsetting) 512 + { 513 + struct btusb_data *data = hdev->driver_data; 514 + struct usb_interface *intf = data->isoc; 515 + struct usb_endpoint_descriptor *ep_desc; 516 + int i, err; 517 + 518 + if (!data->isoc) 519 + return -ENODEV; 520 + 521 + err = usb_set_interface(data->udev, 1, altsetting); 522 + if (err < 0) { 523 + BT_ERR("%s setting interface failed (%d)", hdev->name, -err); 524 + return err; 525 + } 526 + 527 + data->isoc_altsetting = altsetting; 528 + 529 + data->isoc_tx_ep = NULL; 530 + data->isoc_rx_ep = NULL; 531 + 532 + for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { 533 + ep_desc = &intf->cur_altsetting->endpoint[i].desc; 534 + 535 + if (!data->isoc_tx_ep && usb_endpoint_is_isoc_out(ep_desc)) { 536 + data->isoc_tx_ep = ep_desc; 537 + continue; 538 + } 539 + 540 + if (!data->isoc_rx_ep && usb_endpoint_is_isoc_in(ep_desc)) { 541 + data->isoc_rx_ep = ep_desc; 542 + continue; 543 + } 544 + } 545 + 546 + if (!data->isoc_tx_ep || !data->isoc_rx_ep) { 547 + BT_ERR("%s invalid SCO descriptors", hdev->name); 548 + return -ENODEV; 549 + } 550 + 551 + return 0; 552 + } 553 + 679 554 static void btusb_work(struct work_struct *work) 680 555 { 681 556 struct btusb_data *data = container_of(work, struct btusb_data, work); 682 557 struct hci_dev *hdev = data->hdev; 683 558 684 - if (hdev->conn_hash.acl_num == 0) { 559 + if (hdev->conn_hash.acl_num > 0) { 560 + if (!test_and_set_bit(BTUSB_BULK_RUNNING, &data->flags)) { 561 + if (btusb_submit_bulk_urb(hdev) < 0) 562 + clear_bit(BTUSB_BULK_RUNNING, &data->flags); 563 + else 564 + btusb_submit_bulk_urb(hdev); 565 + } 566 + } else { 685 567 clear_bit(BTUSB_BULK_RUNNING, &data->flags); 686 568 usb_kill_anchored_urbs(&data->bulk_anchor); 687 - return; 688 569 } 689 570 690 - if (!test_and_set_bit(BTUSB_BULK_RUNNING, &data->flags)) { 691 - if (btusb_submit_bulk_urb(hdev) < 0) 692 - clear_bit(BTUSB_BULK_RUNNING, &data->flags); 693 - else 694 - btusb_submit_bulk_urb(hdev); 571 + if (hdev->conn_hash.sco_num > 0) { 572 + if (data->isoc_altsetting != 2) { 573 + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); 574 + usb_kill_anchored_urbs(&data->isoc_anchor); 575 + 576 + if (__set_isoc_interface(hdev, 2) < 0) 577 + return; 578 + } 579 + 580 + if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) { 581 + if (btusb_submit_isoc_urb(hdev) < 0) 582 + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); 583 + else 584 + btusb_submit_isoc_urb(hdev); 585 + } 586 + } else { 587 + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); 588 + usb_kill_anchored_urbs(&data->isoc_anchor); 589 + 590 + __set_isoc_interface(hdev, 0); 695 591 } 696 592 } 697 593 ··· 829 597 init_usb_anchor(&data->tx_anchor); 830 598 init_usb_anchor(&data->intr_anchor); 831 599 init_usb_anchor(&data->bulk_anchor); 600 + init_usb_anchor(&data->isoc_anchor); 832 601 833 602 hdev = hci_alloc_dev(); 834 603 if (!hdev) { ··· 853 620 854 621 hdev->owner = THIS_MODULE; 855 622 623 + /* interface numbers are hardcoded in the spec */ 624 + data->isoc = usb_ifnum_to_if(data->udev, 1); 625 + 856 626 if (reset || id->driver_info & BTUSB_RESET) 857 627 set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); 858 628 ··· 864 628 set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); 865 629 } 866 630 631 + if (id->driver_info & BTUSB_BROKEN_ISOC) 632 + data->isoc = NULL; 633 + 867 634 if (id->driver_info & BTUSB_SNIFFER) { 868 - struct usb_device *udev = interface_to_usbdev(intf); 635 + struct usb_device *udev = data->udev; 869 636 870 637 if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) 871 638 set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); 639 + 640 + data->isoc = NULL; 872 641 } 873 642 874 643 if (id->driver_info & BTUSB_BCM92035) { ··· 884 643 if (skb) { 885 644 memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd)); 886 645 skb_queue_tail(&hdev->driver_init, skb); 646 + } 647 + } 648 + 649 + if (data->isoc) { 650 + err = usb_driver_claim_interface(&btusb_driver, 651 + data->isoc, NULL); 652 + if (err < 0) { 653 + hci_free_dev(hdev); 654 + kfree(data); 655 + return err; 887 656 } 888 657 } 889 658 ··· 920 669 return; 921 670 922 671 hdev = data->hdev; 672 + 673 + if (data->isoc) 674 + usb_driver_release_interface(&btusb_driver, data->isoc); 923 675 924 676 usb_set_intfdata(intf, NULL); 925 677
+1 -1
drivers/bluetooth/hci_ldisc.c
··· 577 577 module_param(reset, bool, 0644); 578 578 MODULE_PARM_DESC(reset, "Send HCI reset command on initialization"); 579 579 580 - MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); 580 + MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 581 581 MODULE_DESCRIPTION("Bluetooth HCI UART driver ver " VERSION); 582 582 MODULE_VERSION(VERSION); 583 583 MODULE_LICENSE("GPL");
+1 -1
drivers/bluetooth/hci_usb.c
··· 1130 1130 MODULE_PARM_DESC(isoc, "Set isochronous transfers for SCO over HCI support"); 1131 1131 #endif 1132 1132 1133 - MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); 1133 + MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 1134 1134 MODULE_DESCRIPTION("Bluetooth HCI USB driver ver " VERSION); 1135 1135 MODULE_VERSION(VERSION); 1136 1136 MODULE_LICENSE("GPL");
+1 -1
drivers/bluetooth/hci_vhci.c
··· 377 377 module_param(minor, int, 0444); 378 378 MODULE_PARM_DESC(minor, "Miscellaneous minor device number"); 379 379 380 - MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); 380 + MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 381 381 MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION); 382 382 MODULE_VERSION(VERSION); 383 383 MODULE_LICENSE("GPL");
-7
drivers/cdrom/cdrom.c
··· 408 408 ENSURE(get_last_session, CDC_MULTI_SESSION); 409 409 ENSURE(get_mcn, CDC_MCN); 410 410 ENSURE(reset, CDC_RESET); 411 - ENSURE(audio_ioctl, CDC_PLAY_AUDIO); 412 411 ENSURE(generic_packet, CDC_GENERIC_PACKET); 413 412 cdi->mc_flags = 0; 414 413 cdo->n_minors = 0; ··· 2505 2506 2506 2507 /* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/ 2507 2508 2508 - if (!CDROM_CAN(CDC_PLAY_AUDIO)) 2509 - return -ENOSYS; 2510 2509 if (copy_from_user(&q, argp, sizeof(q))) 2511 2510 return -EFAULT; 2512 2511 ··· 2535 2538 2536 2539 /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */ 2537 2540 2538 - if (!CDROM_CAN(CDC_PLAY_AUDIO)) 2539 - return -ENOSYS; 2540 2541 if (copy_from_user(&header, argp, sizeof(header))) 2541 2542 return -EFAULT; 2542 2543 ··· 2557 2562 2558 2563 /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */ 2559 2564 2560 - if (!CDROM_CAN(CDC_PLAY_AUDIO)) 2561 - return -ENOSYS; 2562 2565 if (copy_from_user(&entry, argp, sizeof(entry))) 2563 2566 return -EFAULT; 2564 2567
+7
drivers/cdrom/gdrom.c
··· 471 471 return err; 472 472 } 473 473 474 + static int gdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, 475 + void *arg) 476 + { 477 + return -EINVAL; 478 + } 479 + 474 480 static struct cdrom_device_ops gdrom_ops = { 475 481 .open = gdrom_open, 476 482 .release = gdrom_release, ··· 484 478 .media_changed = gdrom_mediachanged, 485 479 .get_last_session = gdrom_get_last_session, 486 480 .reset = gdrom_hardreset, 481 + .audio_ioctl = gdrom_audio_ioctl, 487 482 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED | 488 483 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R, 489 484 .n_minors = 1,
+7
drivers/cdrom/viocd.c
··· 550 550 } 551 551 } 552 552 553 + static int viocd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, 554 + void *arg) 555 + { 556 + return -EINVAL; 557 + } 558 + 553 559 static struct cdrom_device_ops viocd_dops = { 554 560 .open = viocd_open, 555 561 .release = viocd_release, 556 562 .media_changed = viocd_media_changed, 557 563 .lock_door = viocd_lock_door, 558 564 .generic_packet = viocd_packet, 565 + .audio_ioctl = viocd_audio_ioctl, 559 566 .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM 560 567 }; 561 568
+3 -5
drivers/char/ipmi/ipmi_si_intf.c
··· 2695 2695 for (i = 0; ; i++) { 2696 2696 if (!ipmi_defaults[i].port) 2697 2697 break; 2698 - 2699 - info = kzalloc(sizeof(*info), GFP_KERNEL); 2700 - if (!info) 2701 - return; 2702 - 2703 2698 #ifdef CONFIG_PPC_MERGE 2704 2699 if (check_legacy_ioport(ipmi_defaults[i].port)) 2705 2700 continue; 2706 2701 #endif 2702 + info = kzalloc(sizeof(*info), GFP_KERNEL); 2703 + if (!info) 2704 + return; 2707 2705 2708 2706 info->addr_source = NULL; 2709 2707
+1
drivers/char/random.c
··· 1571 1571 1572 1572 return half_md4_transform(hash, keyptr->secret); 1573 1573 } 1574 + EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral); 1574 1575 1575 1576 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1576 1577 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
-1
drivers/char/xilinx_hwicap/buffer_icap.h
··· 38 38 39 39 #include <linux/types.h> 40 40 #include <linux/cdev.h> 41 - #include <linux/version.h> 42 41 #include <linux/platform_device.h> 43 42 44 43 #include <asm/io.h>
-1
drivers/char/xilinx_hwicap/fifo_icap.h
··· 38 38 39 39 #include <linux/types.h> 40 40 #include <linux/cdev.h> 41 - #include <linux/version.h> 42 41 #include <linux/platform_device.h> 43 42 44 43 #include <asm/io.h>
-1
drivers/char/xilinx_hwicap/xilinx_hwicap.h
··· 38 38 39 39 #include <linux/types.h> 40 40 #include <linux/cdev.h> 41 - #include <linux/version.h> 42 41 #include <linux/platform_device.h> 43 42 44 43 #include <asm/io.h>
-1
drivers/edac/edac_core.h
··· 34 34 #include <linux/platform_device.h> 35 35 #include <linux/sysdev.h> 36 36 #include <linux/workqueue.h> 37 - #include <linux/version.h> 38 37 39 38 #define EDAC_MC_LABEL_LEN 31 40 39 #define EDAC_DEVICE_NAME_LEN 31
+2 -2
drivers/firewire/Kconfig
··· 12 12 This is the "Juju" FireWire stack, a new alternative implementation 13 13 designed for robustness and simplicity. You can build either this 14 14 stack, or the old stack (the ieee1394 driver, ohci1394 etc.) or both. 15 - Please read http://wiki.linux1394.org/JujuMigration before you 16 - enable the new stack. 15 + Please read http://ieee1394.wiki.kernel.org/index.php/Juju_Migration 16 + before you enable the new stack. 17 17 18 18 To compile this driver as a module, say M here: the module will be 19 19 called firewire-core.
-1
drivers/i2c/busses/i2c-at91.c
··· 14 14 */ 15 15 16 16 #include <linux/module.h> 17 - #include <linux/version.h> 18 17 #include <linux/kernel.h> 19 18 #include <linux/err.h> 20 19 #include <linux/slab.h>
+10 -10
drivers/ide/ide-cd.c
··· 1272 1272 */ 1273 1273 static void msf_from_bcd(struct atapi_msf *msf) 1274 1274 { 1275 - msf->minute = BCD2BIN(msf->minute); 1276 - msf->second = BCD2BIN(msf->second); 1277 - msf->frame = BCD2BIN(msf->frame); 1275 + msf->minute = bcd2bin(msf->minute); 1276 + msf->second = bcd2bin(msf->second); 1277 + msf->frame = bcd2bin(msf->frame); 1278 1278 } 1279 1279 1280 1280 int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense) ··· 1415 1415 return stat; 1416 1416 1417 1417 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { 1418 - toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); 1419 - toc->hdr.last_track = BCD2BIN(toc->hdr.last_track); 1418 + toc->hdr.first_track = bcd2bin(toc->hdr.first_track); 1419 + toc->hdr.last_track = bcd2bin(toc->hdr.last_track); 1420 1420 } 1421 1421 1422 1422 ntracks = toc->hdr.last_track - toc->hdr.first_track + 1; ··· 1456 1456 return stat; 1457 1457 1458 1458 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { 1459 - toc->hdr.first_track = (u8)BIN2BCD(CDROM_LEADOUT); 1460 - toc->hdr.last_track = (u8)BIN2BCD(CDROM_LEADOUT); 1459 + toc->hdr.first_track = (u8)bin2bcd(CDROM_LEADOUT); 1460 + toc->hdr.last_track = (u8)bin2bcd(CDROM_LEADOUT); 1461 1461 } else { 1462 1462 toc->hdr.first_track = CDROM_LEADOUT; 1463 1463 toc->hdr.last_track = CDROM_LEADOUT; ··· 1470 1470 toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length); 1471 1471 1472 1472 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { 1473 - toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); 1474 - toc->hdr.last_track = BCD2BIN(toc->hdr.last_track); 1473 + toc->hdr.first_track = bcd2bin(toc->hdr.first_track); 1474 + toc->hdr.last_track = bcd2bin(toc->hdr.last_track); 1475 1475 } 1476 1476 1477 1477 for (i = 0; i <= ntracks; i++) { 1478 1478 if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) { 1479 1479 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) 1480 - toc->ent[i].track = BCD2BIN(toc->ent[i].track); 1480 + toc->ent[i].track = bcd2bin(toc->ent[i].track); 1481 1481 msf_from_bcd(&toc->ent[i].addr.msf); 1482 1482 } 1483 1483 toc->ent[i].addr.lba = msf_to_lba(toc->ent[i].addr.msf.minute,
+1 -1
drivers/ide/pci/aec62xx.c
··· 307 307 .name = "AEC62xx_IDE", 308 308 .id_table = aec62xx_pci_tbl, 309 309 .probe = aec62xx_init_one, 310 - .remove = aec62xx_remove, 310 + .remove = __devexit_p(aec62xx_remove), 311 311 }; 312 312 313 313 static int __init aec62xx_ide_init(void)
+1 -1
drivers/ide/pci/cy82c693.c
··· 447 447 .name = "Cypress_IDE", 448 448 .id_table = cy82c693_pci_tbl, 449 449 .probe = cy82c693_init_one, 450 - .remove = cy82c693_remove, 450 + .remove = __devexit_p(cy82c693_remove), 451 451 }; 452 452 453 453 static int __init cy82c693_ide_init(void)
+1 -1
drivers/ide/pci/hpt366.c
··· 1620 1620 .name = "HPT366_IDE", 1621 1621 .id_table = hpt366_pci_tbl, 1622 1622 .probe = hpt366_init_one, 1623 - .remove = hpt366_remove, 1623 + .remove = __devexit_p(hpt366_remove), 1624 1624 }; 1625 1625 1626 1626 static int __init hpt366_ide_init(void)
+1 -1
drivers/ide/pci/it821x.c
··· 686 686 .name = "ITE821x IDE", 687 687 .id_table = it821x_pci_tbl, 688 688 .probe = it821x_init_one, 689 - .remove = it821x_remove, 689 + .remove = __devexit_p(it821x_remove), 690 690 }; 691 691 692 692 static int __init it821x_ide_init(void)
+1 -1
drivers/ide/pci/pdc202xx_new.c
··· 566 566 .name = "Promise_IDE", 567 567 .id_table = pdc202new_pci_tbl, 568 568 .probe = pdc202new_init_one, 569 - .remove = pdc202new_remove, 569 + .remove = __devexit_p(pdc202new_remove), 570 570 }; 571 571 572 572 static int __init pdc202new_ide_init(void)
+1 -1
drivers/ide/pci/scc_pata.c
··· 954 954 .name = "SCC IDE", 955 955 .id_table = scc_pci_tbl, 956 956 .probe = scc_init_one, 957 - .remove = scc_remove, 957 + .remove = __devexit_p(scc_remove), 958 958 }; 959 959 960 960 static int scc_ide_init(void)
+2 -2
drivers/ide/pci/sgiioc4.c
··· 621 621 if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE, 622 622 DRV_NAME)) { 623 623 printk(KERN_ERR 624 - "%s : %s -- ERROR, Addresses " 624 + "%s %s: -- ERROR, Addresses " 625 625 "0x%p to 0x%p ALREADY in use\n", 626 - __func__, DRV_NAME, (void *) cmd_phys_base, 626 + DRV_NAME, pci_name(dev), (void *)cmd_phys_base, 627 627 (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE); 628 628 return -ENOMEM; 629 629 }
+1 -1
drivers/ide/pci/siimage.c
··· 832 832 .name = "SiI_IDE", 833 833 .id_table = siimage_pci_tbl, 834 834 .probe = siimage_init_one, 835 - .remove = siimage_remove, 835 + .remove = __devexit_p(siimage_remove), 836 836 }; 837 837 838 838 static int __init siimage_ide_init(void)
+1 -1
drivers/ide/pci/sis5513.c
··· 610 610 .name = "SIS_IDE", 611 611 .id_table = sis5513_pci_tbl, 612 612 .probe = sis5513_init_one, 613 - .remove = sis5513_remove, 613 + .remove = __devexit_p(sis5513_remove), 614 614 }; 615 615 616 616 static int __init sis5513_ide_init(void)
+1 -1
drivers/ide/pci/tc86c001.c
··· 249 249 .name = "TC86C001", 250 250 .id_table = tc86c001_pci_tbl, 251 251 .probe = tc86c001_init_one, 252 - .remove = tc86c001_remove, 252 + .remove = __devexit_p(tc86c001_remove), 253 253 }; 254 254 255 255 static int __init tc86c001_ide_init(void)
+1 -1
drivers/ide/pci/via82cxxx.c
··· 491 491 .name = "VIA_IDE", 492 492 .id_table = via_pci_tbl, 493 493 .probe = via_init_one, 494 - .remove = via_remove, 494 + .remove = __devexit_p(via_remove), 495 495 }; 496 496 497 497 static int __init via_ide_init(void)
+33 -30
drivers/ieee1394/nodemgr.c
··· 844 844 ne->host = host; 845 845 ne->nodeid = nodeid; 846 846 ne->generation = generation; 847 - ne->needs_probe = 1; 847 + ne->needs_probe = true; 848 848 849 849 ne->guid = guid; 850 850 ne->guid_vendor_id = (guid >> 40) & 0xffffff; ··· 1144 1144 struct csr1212_keyval *kv, *vendor_name_kv = NULL; 1145 1145 u8 last_key_id = 0; 1146 1146 1147 - ne->needs_probe = 0; 1147 + ne->needs_probe = false; 1148 1148 1149 1149 csr1212_for_each_dir_entry(ne->csr, kv, ne->csr->root_kv, dentry) { 1150 1150 switch (kv->key.id) { ··· 1295 1295 nodemgr_update_bus_options(ne); 1296 1296 1297 1297 /* Mark the node as new, so it gets re-probed */ 1298 - ne->needs_probe = 1; 1298 + ne->needs_probe = true; 1299 1299 } else { 1300 1300 /* old cache is valid, so update its generation */ 1301 1301 struct nodemgr_csr_info *ci = ne->csr->private; ··· 1566 1566 struct probe_param { 1567 1567 struct host_info *hi; 1568 1568 int generation; 1569 + bool probe_now; 1569 1570 }; 1570 1571 1571 - static int __nodemgr_node_probe(struct device *dev, void *data) 1572 + static int node_probe(struct device *dev, void *data) 1572 1573 { 1573 - struct probe_param *param = (struct probe_param *)data; 1574 + struct probe_param *p = data; 1574 1575 struct node_entry *ne; 1575 1576 1577 + if (p->generation != get_hpsb_generation(p->hi->host)) 1578 + return -EAGAIN; 1579 + 1576 1580 ne = container_of(dev, struct node_entry, node_dev); 1577 - if (!ne->needs_probe) 1578 - nodemgr_probe_ne(param->hi, ne, param->generation); 1579 - if (ne->needs_probe) 1580 - nodemgr_probe_ne(param->hi, ne, param->generation); 1581 + if (ne->needs_probe == p->probe_now) 1582 + nodemgr_probe_ne(p->hi, ne, p->generation); 1581 1583 return 0; 1582 1584 } 1583 1585 1584 1586 static void nodemgr_node_probe(struct host_info *hi, int generation) 1585 1587 { 1586 - struct hpsb_host *host = hi->host; 1587 - struct probe_param param; 1588 + struct probe_param p; 1588 1589 1589 - param.hi = hi; 1590 - param.generation = generation; 1591 - /* Do some processing of the nodes we've probed. This pulls them 1590 + p.hi = hi; 1591 + p.generation = generation; 1592 + /* 1593 + * Do some processing of the nodes we've probed. This pulls them 1592 1594 * into the sysfs layer if needed, and can result in processing of 1593 1595 * unit-directories, or just updating the node and it's 1594 1596 * unit-directories. 1595 1597 * 1596 1598 * Run updates before probes. Usually, updates are time-critical 1597 - * while probes are time-consuming. (Well, those probes need some 1598 - * improvement...) */ 1599 - 1600 - class_for_each_device(&nodemgr_ne_class, NULL, &param, 1601 - __nodemgr_node_probe); 1602 - 1603 - /* If we had a bus reset while we were scanning the bus, it is 1604 - * possible that we did not probe all nodes. In that case, we 1605 - * skip the clean up for now, since we could remove nodes that 1606 - * were still on the bus. Another bus scan is pending which will 1607 - * do the clean up eventually. 1599 + * while probes are time-consuming. 1608 1600 * 1601 + * Meanwhile, another bus reset may have happened. In this case we 1602 + * skip everything here and let the next bus scan handle it. 1603 + * Otherwise we may prematurely remove nodes which are still there. 1604 + */ 1605 + p.probe_now = false; 1606 + if (class_for_each_device(&nodemgr_ne_class, NULL, &p, node_probe) != 0) 1607 + return; 1608 + 1609 + p.probe_now = true; 1610 + if (class_for_each_device(&nodemgr_ne_class, NULL, &p, node_probe) != 0) 1611 + return; 1612 + /* 1609 1613 * Now let's tell the bus to rescan our devices. This may seem 1610 1614 * like overhead, but the driver-model core will only scan a 1611 1615 * device for a driver when either the device is added, or when a 1612 1616 * new driver is added. A bus reset is a good reason to rescan 1613 1617 * devices that were there before. For example, an sbp2 device 1614 1618 * may become available for login, if the host that held it was 1615 - * just removed. */ 1616 - 1617 - if (generation == get_hpsb_generation(host)) 1618 - if (bus_rescan_devices(&ieee1394_bus_type)) 1619 - HPSB_DEBUG("bus_rescan_devices had an error"); 1619 + * just removed. 1620 + */ 1621 + if (bus_rescan_devices(&ieee1394_bus_type) != 0) 1622 + HPSB_DEBUG("bus_rescan_devices had an error"); 1620 1623 } 1621 1624 1622 1625 static int nodemgr_send_resume_packet(struct hpsb_host *host)
+1 -1
drivers/ieee1394/nodemgr.h
··· 97 97 struct hpsb_host *host; /* Host this node is attached to */ 98 98 nodeid_t nodeid; /* NodeID */ 99 99 struct bus_options busopt; /* Bus Options */ 100 - int needs_probe; 100 + bool needs_probe; 101 101 unsigned int generation; /* Synced with hpsb generation */ 102 102 103 103 /* The following is read from the config rom */
+18 -7
drivers/ieee1394/sbp2.c
··· 731 731 { 732 732 struct sbp2_lu *lu = ud->device.driver_data; 733 733 734 - if (sbp2_reconnect_device(lu)) { 735 - /* Reconnect has failed. Perhaps we didn't reconnect fast 736 - * enough. Try a regular login, but first log out just in 737 - * case of any weirdness. */ 734 + if (sbp2_reconnect_device(lu) != 0) { 735 + /* 736 + * Reconnect failed. If another bus reset happened, 737 + * let nodemgr proceed and call sbp2_update again later 738 + * (or sbp2_remove if this node went away). 739 + */ 740 + if (!hpsb_node_entry_valid(lu->ne)) 741 + return 0; 742 + /* 743 + * Or the target rejected the reconnect because we weren't 744 + * fast enough. Try a regular login, but first log out 745 + * just in case of any weirdness. 746 + */ 738 747 sbp2_logout_device(lu); 739 748 740 - if (sbp2_login_device(lu)) { 741 - /* Login failed too, just fail, and the backend 742 - * will call our sbp2_remove for us */ 749 + if (sbp2_login_device(lu) != 0) { 750 + if (!hpsb_node_entry_valid(lu->ne)) 751 + return 0; 752 + 753 + /* Maybe another initiator won the login. */ 743 754 SBP2_ERR("Failed to reconnect to sbp2 device!"); 744 755 return -EBUSY; 745 756 }
-1
drivers/infiniband/hw/ehca/ehca_tools.h
··· 54 54 #include <linux/module.h> 55 55 #include <linux/moduleparam.h> 56 56 #include <linux/vmalloc.h> 57 - #include <linux/version.h> 58 57 #include <linux/notifier.h> 59 58 #include <linux/cpu.h> 60 59 #include <linux/device.h>
-1
drivers/infiniband/hw/ipath/ipath_fs.c
··· 31 31 * SOFTWARE. 32 32 */ 33 33 34 - #include <linux/version.h> 35 34 #include <linux/module.h> 36 35 #include <linux/fs.h> 37 36 #include <linux/mount.h>
+1 -1
drivers/infiniband/hw/ipath/ipath_iba7220.c
··· 1720 1720 "not 2KB aligned!\n", pa); 1721 1721 return; 1722 1722 } 1723 - if (pa >= (1UL << IBA7220_TID_SZ_SHIFT)) { 1723 + if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) { 1724 1724 ipath_dev_err(dd, 1725 1725 "BUG: Physical page address 0x%lx " 1726 1726 "larger than supported\n", pa);
+6 -2
drivers/infiniband/hw/ipath/ipath_ud.c
··· 267 267 u16 lrh0; 268 268 u16 lid; 269 269 int ret = 0; 270 + int next_cur; 270 271 271 272 spin_lock_irqsave(&qp->s_lock, flags); 272 273 ··· 291 290 goto bail; 292 291 293 292 wqe = get_swqe_ptr(qp, qp->s_cur); 294 - if (++qp->s_cur >= qp->s_size) 295 - qp->s_cur = 0; 293 + next_cur = qp->s_cur + 1; 294 + if (next_cur >= qp->s_size) 295 + next_cur = 0; 296 296 297 297 /* Construct the header. */ 298 298 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; ··· 317 315 qp->s_flags |= IPATH_S_WAIT_DMA; 318 316 goto bail; 319 317 } 318 + qp->s_cur = next_cur; 320 319 spin_unlock_irqrestore(&qp->s_lock, flags); 321 320 ipath_ud_loopback(qp, wqe); 322 321 spin_lock_irqsave(&qp->s_lock, flags); ··· 326 323 } 327 324 } 328 325 326 + qp->s_cur = next_cur; 329 327 extra_bytes = -wqe->length & 3; 330 328 nwords = (wqe->length + extra_bytes) >> 2; 331 329
-1
drivers/infiniband/hw/nes/nes.h
··· 43 43 #include <linux/dma-mapping.h> 44 44 #include <linux/workqueue.h> 45 45 #include <linux/slab.h> 46 - #include <linux/version.h> 47 46 #include <asm/io.h> 48 47 #include <linux/crc32c.h> 49 48
+9 -10
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 156 156 157 157 netif_stop_queue(dev); 158 158 159 - /* 160 - * Now flush workqueue to make sure a scheduled task doesn't 161 - * bring our internal state back up. 162 - */ 163 - flush_workqueue(ipoib_workqueue); 164 - 165 - ipoib_ib_dev_down(dev, 1); 166 - ipoib_ib_dev_stop(dev, 1); 159 + ipoib_ib_dev_down(dev, 0); 160 + ipoib_ib_dev_stop(dev, 0); 167 161 168 162 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 169 163 struct ipoib_dev_priv *cpriv; ··· 1308 1314 1309 1315 register_failed: 1310 1316 ib_unregister_event_handler(&priv->event_handler); 1311 - flush_scheduled_work(); 1317 + flush_workqueue(ipoib_workqueue); 1312 1318 1313 1319 event_failed: 1314 1320 ipoib_dev_cleanup(priv->dev); ··· 1367 1373 1368 1374 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1369 1375 ib_unregister_event_handler(&priv->event_handler); 1370 - flush_scheduled_work(); 1376 + 1377 + rtnl_lock(); 1378 + dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); 1379 + rtnl_unlock(); 1380 + 1381 + flush_workqueue(ipoib_workqueue); 1371 1382 1372 1383 unregister_netdev(priv->dev); 1373 1384 ipoib_dev_cleanup(priv->dev);
+9 -1
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 392 392 &priv->mcast_task, 0); 393 393 mutex_unlock(&mcast_mutex); 394 394 395 - if (mcast == priv->broadcast) 395 + if (mcast == priv->broadcast) { 396 + /* 397 + * Take RTNL lock here to avoid racing with 398 + * ipoib_stop() and turning the carrier back 399 + * on while a device is being removed. 400 + */ 401 + rtnl_lock(); 396 402 netif_carrier_on(dev); 403 + rtnl_unlock(); 404 + } 397 405 398 406 return 0; 399 407 }
-1
drivers/infiniband/ulp/iser/iser_verbs.c
··· 33 33 #include <linux/kernel.h> 34 34 #include <linux/module.h> 35 35 #include <linux/delay.h> 36 - #include <linux/version.h> 37 36 38 37 #include "iscsi_iser.h" 39 38
+2 -2
drivers/input/evdev.c
··· 677 677 len = OLD_KEY_MAX; 678 678 if (printk_timed_ratelimit(&keymax_warn_time, 10 * 1000)) 679 679 printk(KERN_WARNING 680 - "evdev.c(EVIOCGBIT): Suspicious buffer size %d, " 681 - "limiting output to %d bytes. See " 680 + "evdev.c(EVIOCGBIT): Suspicious buffer size %u, " 681 + "limiting output to %zu bytes. See " 682 682 "http://userweb.kernel.org/~dtor/eviocgbit-bug.html\n", 683 683 OLD_KEY_MAX, 684 684 BITS_TO_LONGS(OLD_KEY_MAX) * sizeof(long));
-1
drivers/input/keyboard/bf54x-keys.c
··· 29 29 */ 30 30 31 31 #include <linux/module.h> 32 - #include <linux/version.h> 33 32 34 33 #include <linux/init.h> 35 34 #include <linux/fs.h>
-1
drivers/input/keyboard/gpio_keys.c
··· 9 9 */ 10 10 11 11 #include <linux/module.h> 12 - #include <linux/version.h> 13 12 14 13 #include <linux/init.h> 15 14 #include <linux/fs.h>
+3
drivers/input/misc/cobalt_btns.c
··· 148 148 return 0; 149 149 } 150 150 151 + MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); 152 + MODULE_DESCRIPTION("Cobalt button interface driver"); 153 + MODULE_LICENSE("GPL"); 151 154 /* work with hotplug and coldplug */ 152 155 MODULE_ALIAS("platform:Cobalt buttons"); 153 156
-1
drivers/input/mouse/gpio_mouse.c
··· 9 9 */ 10 10 11 11 #include <linux/init.h> 12 - #include <linux/version.h> 13 12 #include <linux/module.h> 14 13 #include <linux/platform_device.h> 15 14 #include <linux/input-polldev.h>
-1
drivers/input/tablet/gtco.c
··· 64 64 #include <asm/byteorder.h> 65 65 66 66 67 - #include <linux/version.h> 68 67 #include <linux/usb/input.h> 69 68 70 69 /* Version with a Major number of 2 is for kernel inclusion only. */
-1
drivers/input/touchscreen/mainstone-wm97xx.c
··· 25 25 26 26 #include <linux/module.h> 27 27 #include <linux/moduleparam.h> 28 - #include <linux/version.h> 29 28 #include <linux/kernel.h> 30 29 #include <linux/init.h> 31 30 #include <linux/delay.h>
+9 -2
drivers/input/touchscreen/migor_ts.c
··· 173 173 input_set_abs_params(input, ABS_X, 95, 955, 0, 0); 174 174 input_set_abs_params(input, ABS_Y, 85, 935, 0, 0); 175 175 176 - input->name = client->driver_name; 176 + input->name = client->name; 177 177 input->id.bustype = BUS_I2C; 178 178 input->dev.parent = &client->dev; 179 179 ··· 192 192 goto err1; 193 193 194 194 error = request_irq(priv->irq, migor_ts_isr, IRQF_TRIGGER_LOW, 195 - client->driver_name, priv); 195 + client->name, priv); 196 196 if (error) { 197 197 dev_err(&client->dev, "Unable to request touchscreen IRQ.\n"); 198 198 goto err2; ··· 224 224 return 0; 225 225 } 226 226 227 + static const struct i2c_device_id migor_ts_id[] = { 228 + { "migor_ts", 0 }, 229 + { } 230 + }; 231 + MODULE_DEVICE_TABLE(i2c, migor_ts); 232 + 227 233 static struct i2c_driver migor_ts_driver = { 228 234 .driver = { 229 235 .name = "migor_ts", 230 236 }, 231 237 .probe = migor_ts_probe, 232 238 .remove = migor_ts_remove, 239 + .id_table = migor_ts_id, 233 240 }; 234 241 235 242 static int __init migor_ts_init(void)
-1
drivers/input/touchscreen/wm9705.c
··· 17 17 18 18 #include <linux/module.h> 19 19 #include <linux/moduleparam.h> 20 - #include <linux/version.h> 21 20 #include <linux/kernel.h> 22 21 #include <linux/input.h> 23 22 #include <linux/delay.h>
-1
drivers/input/touchscreen/wm9712.c
··· 17 17 18 18 #include <linux/module.h> 19 19 #include <linux/moduleparam.h> 20 - #include <linux/version.h> 21 20 #include <linux/kernel.h> 22 21 #include <linux/input.h> 23 22 #include <linux/delay.h>
-1
drivers/input/touchscreen/wm9713.c
··· 17 17 18 18 #include <linux/module.h> 19 19 #include <linux/moduleparam.h> 20 - #include <linux/version.h> 21 20 #include <linux/kernel.h> 22 21 #include <linux/input.h> 23 22 #include <linux/delay.h>
-1
drivers/input/touchscreen/wm97xx-core.c
··· 37 37 38 38 #include <linux/module.h> 39 39 #include <linux/moduleparam.h> 40 - #include <linux/version.h> 41 40 #include <linux/kernel.h> 42 41 #include <linux/init.h> 43 42 #include <linux/delay.h>
-1
drivers/mfd/asic3.c
··· 16 16 * 17 17 */ 18 18 19 - #include <linux/version.h> 20 19 #include <linux/kernel.h> 21 20 #include <linux/irq.h> 22 21 #include <linux/gpio.h>
+5
drivers/misc/acer-wmi.c
··· 192 192 193 193 static void set_quirks(void) 194 194 { 195 + if (!interface) 196 + return; 197 + 195 198 if (quirks->mailled) 196 199 interface->capability |= ACER_CAP_MAILLED; 197 200 ··· 1239 1236 "load\n"); 1240 1237 return -ENODEV; 1241 1238 } 1239 + 1240 + set_quirks(); 1242 1241 1243 1242 if (platform_driver_register(&acer_platform_driver)) { 1244 1243 printk(ACER_ERR "Unable to register platform driver.\n");
+1 -1
drivers/misc/eeepc-laptop.c
··· 553 553 hwmon = eeepc_hwmon_device; 554 554 if (!hwmon) 555 555 return ; 556 - hwmon_device_unregister(hwmon); 557 556 sysfs_remove_group(&hwmon->kobj, 558 557 &hwmon_attribute_group); 558 + hwmon_device_unregister(hwmon); 559 559 eeepc_hwmon_device = NULL; 560 560 } 561 561
-1
drivers/misc/eeprom_93cx6.c
··· 26 26 27 27 #include <linux/kernel.h> 28 28 #include <linux/module.h> 29 - #include <linux/version.h> 30 29 #include <linux/delay.h> 31 30 #include <linux/eeprom_93cx6.h> 32 31
-1
drivers/mtd/maps/amd76xrom.c
··· 6 6 7 7 #include <linux/module.h> 8 8 #include <linux/types.h> 9 - #include <linux/version.h> 10 9 #include <linux/kernel.h> 11 10 #include <linux/init.h> 12 11 #include <asm/io.h>
-1
drivers/mtd/maps/ck804xrom.c
··· 9 9 10 10 #include <linux/module.h> 11 11 #include <linux/types.h> 12 - #include <linux/version.h> 13 12 #include <linux/kernel.h> 14 13 #include <linux/init.h> 15 14 #include <asm/io.h>
-1
drivers/mtd/maps/esb2rom.c
··· 12 12 13 13 #include <linux/module.h> 14 14 #include <linux/types.h> 15 - #include <linux/version.h> 16 15 #include <linux/kernel.h> 17 16 #include <linux/init.h> 18 17 #include <asm/io.h>
-1
drivers/mtd/nand/au1550nd.c
··· 16 16 #include <linux/mtd/mtd.h> 17 17 #include <linux/mtd/nand.h> 18 18 #include <linux/mtd/partitions.h> 19 - #include <linux/version.h> 20 19 #include <asm/io.h> 21 20 22 21 #include <asm/mach-au1x00/au1xxx.h>
+1 -1
drivers/net/Kconfig
··· 1172 1172 1173 1173 config NE2000 1174 1174 tristate "NE2000/NE1000 support" 1175 - depends on NET_ISA || (Q40 && m) || M32R || TOSHIBA_RBTX4927 || TOSHIBA_RBTX4938 1175 + depends on NET_ISA || (Q40 && m) || M32R || MACH_TX49XX 1176 1176 select CRC32 1177 1177 ---help--- 1178 1178 If you have a network (Ethernet) card of this type, say Y and read
-1
drivers/net/acenic.c
··· 52 52 53 53 #include <linux/module.h> 54 54 #include <linux/moduleparam.h> 55 - #include <linux/version.h> 56 55 #include <linux/types.h> 57 56 #include <linux/errno.h> 58 57 #include <linux/ioport.h>
+3 -3
drivers/net/arm/ixp4xx_eth.c
··· 551 551 if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { 552 552 phys = dma_map_single(&dev->dev, skb->data, 553 553 RX_BUFF_SIZE, DMA_FROM_DEVICE); 554 - if (dma_mapping_error(phys)) { 554 + if (dma_mapping_error(&dev->dev, phys)) { 555 555 dev_kfree_skb(skb); 556 556 skb = NULL; 557 557 } ··· 698 698 #endif 699 699 700 700 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); 701 - if (dma_mapping_error(phys)) { 701 + if (dma_mapping_error(&dev->dev, phys)) { 702 702 #ifdef __ARMEB__ 703 703 dev_kfree_skb(skb); 704 704 #else ··· 883 883 desc->buf_len = MAX_MRU; 884 884 desc->data = dma_map_single(&port->netdev->dev, data, 885 885 RX_BUFF_SIZE, DMA_FROM_DEVICE); 886 - if (dma_mapping_error(desc->data)) { 886 + if (dma_mapping_error(&port->netdev->dev, desc->data)) { 887 887 free_buffer(buff); 888 888 return -EIO; 889 889 }
+1 -1
drivers/net/atl1e/atl1e_ethtool.c
··· 355 355 struct atl1e_adapter *adapter = netdev_priv(netdev); 356 356 357 357 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | 358 - WAKE_MCAST | WAKE_BCAST | WAKE_MCAST)) 358 + WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) 359 359 return -EOPNOTSUPP; 360 360 /* these settings will always override what we currently have */ 361 361 adapter->wol = 0;
+1 -1
drivers/net/au1000_eth.c
··· 807 807 static int au1000_init(struct net_device *dev) 808 808 { 809 809 struct au1000_private *aup = (struct au1000_private *) dev->priv; 810 - u32 flags; 810 + unsigned long flags; 811 811 int i; 812 812 u32 control; 813 813
+2 -2
drivers/net/ax88796.c
··· 554 554 555 555 spin_lock_irqsave(&ax->mii_lock, flags); 556 556 mii_ethtool_gset(&ax->mii, cmd); 557 - spin_lock_irqsave(&ax->mii_lock, flags); 557 + spin_unlock_irqrestore(&ax->mii_lock, flags); 558 558 559 559 return 0; 560 560 } ··· 567 567 568 568 spin_lock_irqsave(&ax->mii_lock, flags); 569 569 rc = mii_ethtool_sset(&ax->mii, cmd); 570 - spin_lock_irqsave(&ax->mii_lock, flags); 570 + spin_unlock_irqrestore(&ax->mii_lock, flags); 571 571 572 572 return rc; 573 573 }
+34 -13
drivers/net/bnx2.c
··· 35 35 #include <linux/time.h> 36 36 #include <linux/ethtool.h> 37 37 #include <linux/mii.h> 38 - #ifdef NETIF_F_HW_VLAN_TX 39 38 #include <linux/if_vlan.h> 39 + #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 40 40 #define BCM_VLAN 1 41 41 #endif 42 42 #include <net/ip.h> ··· 57 57 58 58 #define DRV_MODULE_NAME "bnx2" 59 59 #define PFX DRV_MODULE_NAME ": " 60 - #define DRV_MODULE_VERSION "1.7.9" 61 - #define DRV_MODULE_RELDATE "July 18, 2008" 60 + #define DRV_MODULE_VERSION "1.8.0" 61 + #define DRV_MODULE_RELDATE "Aug 14, 2008" 62 62 63 63 #define RUN_AT(x) (jiffies + (x)) 64 64 ··· 2876 2876 struct sw_bd *rx_buf; 2877 2877 struct sk_buff *skb; 2878 2878 dma_addr_t dma_addr; 2879 + u16 vtag = 0; 2880 + int hw_vlan __maybe_unused = 0; 2879 2881 2880 2882 sw_ring_cons = RX_RING_IDX(sw_cons); 2881 2883 sw_ring_prod = RX_RING_IDX(sw_prod); ··· 2921 2919 if (len <= bp->rx_copy_thresh) { 2922 2920 struct sk_buff *new_skb; 2923 2921 2924 - new_skb = netdev_alloc_skb(bp->dev, len + 2); 2922 + new_skb = netdev_alloc_skb(bp->dev, len + 6); 2925 2923 if (new_skb == NULL) { 2926 2924 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, 2927 2925 sw_ring_prod); ··· 2930 2928 2931 2929 /* aligned copy */ 2932 2930 skb_copy_from_linear_data_offset(skb, 2933 - BNX2_RX_OFFSET - 2, 2934 - new_skb->data, len + 2); 2935 - skb_reserve(new_skb, 2); 2931 + BNX2_RX_OFFSET - 6, 2932 + new_skb->data, len + 6); 2933 + skb_reserve(new_skb, 6); 2936 2934 skb_put(new_skb, len); 2937 2935 2938 2936 bnx2_reuse_rx_skb(bp, rxr, skb, ··· 2942 2940 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len, 2943 2941 dma_addr, (sw_ring_cons << 16) | sw_ring_prod))) 2944 2942 goto next_rx; 2943 + 2944 + if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && 2945 + !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) { 2946 + vtag = rx_hdr->l2_fhdr_vlan_tag; 2947 + #ifdef BCM_VLAN 2948 + if (bp->vlgrp) 2949 + hw_vlan = 1; 2950 + else 2951 + #endif 2952 + { 2953 + struct vlan_ethhdr *ve = (struct vlan_ethhdr *) 2954 + __skb_push(skb, 4); 2955 + 2956 + memmove(ve, skb->data + 4, ETH_ALEN * 2); 2957 + ve->h_vlan_proto = htons(ETH_P_8021Q); 2958 + ve->h_vlan_TCI = htons(vtag); 2959 + len += 4; 2960 + } 2961 + } 2945 2962 2946 2963 skb->protocol = eth_type_trans(skb, bp->dev); 2947 2964 ··· 2983 2962 } 2984 2963 2985 2964 #ifdef BCM_VLAN 2986 - if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) { 2987 - vlan_hwaccel_receive_skb(skb, bp->vlgrp, 2988 - rx_hdr->l2_fhdr_vlan_tag); 2989 - } 2965 + if (hw_vlan) 2966 + vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag); 2990 2967 else 2991 2968 #endif 2992 2969 netif_receive_skb(skb); ··· 3256 3237 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); 3257 3238 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN; 3258 3239 #ifdef BCM_VLAN 3259 - if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE)) 3240 + if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) 3260 3241 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; 3261 3242 #else 3262 - if (!(bp->flags & BNX2_FLAG_ASF_ENABLE)) 3243 + if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) 3263 3244 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; 3264 3245 #endif 3265 3246 if (dev->flags & IFF_PROMISC) { ··· 5982 5963 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 5983 5964 } 5984 5965 5966 + #ifdef BCM_VLAN 5985 5967 if (bp->vlgrp && vlan_tx_tag_present(skb)) { 5986 5968 vlan_tag_flags |= 5987 5969 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); 5988 5970 } 5971 + #endif 5989 5972 if ((mss = skb_shinfo(skb)->gso_size)) { 5990 5973 u32 tcp_opt_len, ip_tcp_len; 5991 5974 struct iphdr *iph;
-1
drivers/net/bnx2x_link.c
··· 21 21 #include <linux/delay.h> 22 22 #include <linux/ethtool.h> 23 23 #include <linux/mutex.h> 24 - #include <linux/version.h> 25 24 26 25 #include "bnx2x_reg.h" 27 26 #include "bnx2x_fw_defs.h"
-1
drivers/net/bnx2x_main.c
··· 44 44 #include <net/ip.h> 45 45 #include <net/tcp.h> 46 46 #include <net/checksum.h> 47 - #include <linux/version.h> 48 47 #include <net/ip6_checksum.h> 49 48 #include <linux/workqueue.h> 50 49 #include <linux/crc32.h>
-1
drivers/net/cpmac.c
··· 26 26 #include <linux/errno.h> 27 27 #include <linux/types.h> 28 28 #include <linux/delay.h> 29 - #include <linux/version.h> 30 29 31 30 #include <linux/netdevice.h> 32 31 #include <linux/etherdevice.h>
+1 -1
drivers/net/e1000e/defines.h
··· 389 389 390 390 /* Interrupt Cause Set */ 391 391 #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ 392 - #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 392 + #define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ 393 393 #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ 394 394 395 395 /* Transmit Descriptor Control */
+1
drivers/net/e1000e/e1000.h
··· 326 326 #define FLAG_RX_CSUM_ENABLED (1 << 28) 327 327 #define FLAG_TSO_FORCE (1 << 29) 328 328 #define FLAG_RX_RESTART_NOW (1 << 30) 329 + #define FLAG_MSI_TEST_FAILED (1 << 31) 329 330 330 331 #define E1000_RX_DESC_PS(R, i) \ 331 332 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
+1 -1
drivers/net/e1000e/ethtool.c
··· 177 177 u32 status; 178 178 179 179 status = er32(STATUS); 180 - return (status & E1000_STATUS_LU); 180 + return (status & E1000_STATUS_LU) ? 1 : 0; 181 181 } 182 182 183 183 static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
+170 -15
drivers/net/e1000e/netdev.c
··· 510 510 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 511 511 if (new_skb) { 512 512 skb_reserve(new_skb, NET_IP_ALIGN); 513 - memcpy(new_skb->data - NET_IP_ALIGN, 514 - skb->data - NET_IP_ALIGN, 515 - length + NET_IP_ALIGN); 513 + skb_copy_to_linear_data_offset(new_skb, 514 + -NET_IP_ALIGN, 515 + (skb->data - 516 + NET_IP_ALIGN), 517 + (length + 518 + NET_IP_ALIGN)); 516 519 /* save the skb in buffer_info as good */ 517 520 buffer_info->skb = skb; 518 521 skb = new_skb; ··· 1236 1233 return IRQ_HANDLED; 1237 1234 } 1238 1235 1236 + /** 1237 + * e1000_request_irq - initialize interrupts 1238 + * 1239 + * Attempts to configure interrupts using the best available 1240 + * capabilities of the hardware and kernel. 1241 + **/ 1239 1242 static int e1000_request_irq(struct e1000_adapter *adapter) 1240 1243 { 1241 1244 struct net_device *netdev = adapter->netdev; 1242 - irq_handler_t handler = e1000_intr; 1243 1245 int irq_flags = IRQF_SHARED; 1244 1246 int err; 1245 1247 1246 - if (!pci_enable_msi(adapter->pdev)) { 1247 - adapter->flags |= FLAG_MSI_ENABLED; 1248 - handler = e1000_intr_msi; 1249 - irq_flags = 0; 1248 + if (!(adapter->flags & FLAG_MSI_TEST_FAILED)) { 1249 + err = pci_enable_msi(adapter->pdev); 1250 + if (!err) { 1251 + adapter->flags |= FLAG_MSI_ENABLED; 1252 + irq_flags = 0; 1253 + } 1250 1254 } 1251 1255 1252 - err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 1253 - netdev); 1256 + err = request_irq(adapter->pdev->irq, 1257 + ((adapter->flags & FLAG_MSI_ENABLED) ? 1258 + &e1000_intr_msi : &e1000_intr), 1259 + irq_flags, netdev->name, netdev); 1254 1260 if (err) { 1255 - e_err("Unable to allocate %s interrupt (return: %d)\n", 1256 - adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx", err); 1257 - if (adapter->flags & FLAG_MSI_ENABLED) 1261 + if (adapter->flags & FLAG_MSI_ENABLED) { 1258 1262 pci_disable_msi(adapter->pdev); 1263 + adapter->flags &= ~FLAG_MSI_ENABLED; 1264 + } 1265 + e_err("Unable to allocate interrupt, Error: %d\n", err); 1259 1266 } 1260 1267 1261 1268 return err; ··· 2605 2592 } 2606 2593 2607 2594 /** 2595 + * e1000_intr_msi_test - Interrupt Handler 2596 + * @irq: interrupt number 2597 + * @data: pointer to a network interface device structure 2598 + **/ 2599 + static irqreturn_t e1000_intr_msi_test(int irq, void *data) 2600 + { 2601 + struct net_device *netdev = data; 2602 + struct e1000_adapter *adapter = netdev_priv(netdev); 2603 + struct e1000_hw *hw = &adapter->hw; 2604 + u32 icr = er32(ICR); 2605 + 2606 + e_dbg("%s: icr is %08X\n", netdev->name, icr); 2607 + if (icr & E1000_ICR_RXSEQ) { 2608 + adapter->flags &= ~FLAG_MSI_TEST_FAILED; 2609 + wmb(); 2610 + } 2611 + 2612 + return IRQ_HANDLED; 2613 + } 2614 + 2615 + /** 2616 + * e1000_test_msi_interrupt - Returns 0 for successful test 2617 + * @adapter: board private struct 2618 + * 2619 + * code flow taken from tg3.c 2620 + **/ 2621 + static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) 2622 + { 2623 + struct net_device *netdev = adapter->netdev; 2624 + struct e1000_hw *hw = &adapter->hw; 2625 + int err; 2626 + 2627 + /* poll_enable hasn't been called yet, so don't need disable */ 2628 + /* clear any pending events */ 2629 + er32(ICR); 2630 + 2631 + /* free the real vector and request a test handler */ 2632 + e1000_free_irq(adapter); 2633 + 2634 + /* Assume that the test fails, if it succeeds then the test 2635 + * MSI irq handler will unset this flag */ 2636 + adapter->flags |= FLAG_MSI_TEST_FAILED; 2637 + 2638 + err = pci_enable_msi(adapter->pdev); 2639 + if (err) 2640 + goto msi_test_failed; 2641 + 2642 + err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0, 2643 + netdev->name, netdev); 2644 + if (err) { 2645 + pci_disable_msi(adapter->pdev); 2646 + goto msi_test_failed; 2647 + } 2648 + 2649 + wmb(); 2650 + 2651 + e1000_irq_enable(adapter); 2652 + 2653 + /* fire an unusual interrupt on the test handler */ 2654 + ew32(ICS, E1000_ICS_RXSEQ); 2655 + e1e_flush(); 2656 + msleep(50); 2657 + 2658 + e1000_irq_disable(adapter); 2659 + 2660 + rmb(); 2661 + 2662 + if (adapter->flags & FLAG_MSI_TEST_FAILED) { 2663 + err = -EIO; 2664 + e_info("MSI interrupt test failed!\n"); 2665 + } 2666 + 2667 + free_irq(adapter->pdev->irq, netdev); 2668 + pci_disable_msi(adapter->pdev); 2669 + 2670 + if (err == -EIO) 2671 + goto msi_test_failed; 2672 + 2673 + /* okay so the test worked, restore settings */ 2674 + e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name); 2675 + msi_test_failed: 2676 + /* restore the original vector, even if it failed */ 2677 + e1000_request_irq(adapter); 2678 + return err; 2679 + } 2680 + 2681 + /** 2682 + * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored 2683 + * @adapter: board private struct 2684 + * 2685 + * code flow taken from tg3.c, called with e1000 interrupts disabled. 2686 + **/ 2687 + static int e1000_test_msi(struct e1000_adapter *adapter) 2688 + { 2689 + int err; 2690 + u16 pci_cmd; 2691 + 2692 + if (!(adapter->flags & FLAG_MSI_ENABLED)) 2693 + return 0; 2694 + 2695 + /* disable SERR in case the MSI write causes a master abort */ 2696 + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); 2697 + pci_write_config_word(adapter->pdev, PCI_COMMAND, 2698 + pci_cmd & ~PCI_COMMAND_SERR); 2699 + 2700 + err = e1000_test_msi_interrupt(adapter); 2701 + 2702 + /* restore previous setting of command word */ 2703 + pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); 2704 + 2705 + /* success ! */ 2706 + if (!err) 2707 + return 0; 2708 + 2709 + /* EIO means MSI test failed */ 2710 + if (err != -EIO) 2711 + return err; 2712 + 2713 + /* back to INTx mode */ 2714 + e_warn("MSI interrupt test failed, using legacy interrupt.\n"); 2715 + 2716 + e1000_free_irq(adapter); 2717 + 2718 + err = e1000_request_irq(adapter); 2719 + 2720 + return err; 2721 + } 2722 + 2723 + /** 2608 2724 * e1000_open - Called when a network interface is made active 2609 2725 * @netdev: network interface device structure 2610 2726 * ··· 2790 2648 err = e1000_request_irq(adapter); 2791 2649 if (err) 2792 2650 goto err_req_irq; 2651 + 2652 + /* 2653 + * Work around PCIe errata with MSI interrupts causing some chipsets to 2654 + * ignore e1000e MSI messages, which means we need to test our MSI 2655 + * interrupt now 2656 + */ 2657 + { 2658 + err = e1000_test_msi(adapter); 2659 + if (err) { 2660 + e_err("Interrupt allocation failed\n"); 2661 + goto err_req_irq; 2662 + } 2663 + } 2793 2664 2794 2665 /* From here on the code is the same as e1000e_up() */ 2795 2666 clear_bit(__E1000_DOWN, &adapter->state); ··· 3210 3055 case SPEED_10: 3211 3056 txb2b = 0; 3212 3057 netdev->tx_queue_len = 10; 3213 - adapter->tx_timeout_factor = 14; 3058 + adapter->tx_timeout_factor = 16; 3214 3059 break; 3215 3060 case SPEED_100: 3216 3061 txb2b = 0; ··· 3876 3721 struct e1000_adapter *adapter = netdev_priv(netdev); 3877 3722 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3878 3723 3879 - if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 3724 + if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || 3880 3725 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3881 3726 e_err("Invalid MTU setting\n"); 3882 3727 return -EINVAL;
+19 -6
drivers/net/e1000e/param.c
··· 324 324 adapter->itr = 20000; 325 325 break; 326 326 default: 327 - e1000_validate_option(&adapter->itr, &opt, 328 - adapter); 329 327 /* 330 - * save the setting, because the dynamic bits 331 - * change itr. clear the lower two bits 332 - * because they are used as control 328 + * Save the setting, because the dynamic bits 329 + * change itr. 333 330 */ 334 - adapter->itr_setting = adapter->itr & ~3; 331 + if (e1000_validate_option(&adapter->itr, &opt, 332 + adapter) && 333 + (adapter->itr == 3)) { 334 + /* 335 + * In case of invalid user value, 336 + * default to conservative mode. 337 + */ 338 + adapter->itr_setting = adapter->itr; 339 + adapter->itr = 20000; 340 + } else { 341 + /* 342 + * Clear the lower two bits because 343 + * they are used as control. 344 + */ 345 + adapter->itr_setting = 346 + adapter->itr & ~3; 347 + } 335 348 break; 336 349 } 337 350 } else {
+2 -4
drivers/net/gianfar.c
··· 134 134 static void gfar_vlan_rx_register(struct net_device *netdev, 135 135 struct vlan_group *grp); 136 136 void gfar_halt(struct net_device *dev); 137 - #ifdef CONFIG_PM 138 137 static void gfar_halt_nodisable(struct net_device *dev); 139 - #endif 140 138 void gfar_start(struct net_device *dev); 141 139 static void gfar_clear_exact_match(struct net_device *dev); 142 140 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); ··· 629 631 } 630 632 631 633 632 - #ifdef CONFIG_PM 633 634 /* Halt the receive and transmit queues */ 634 635 static void gfar_halt_nodisable(struct net_device *dev) 635 636 { ··· 654 657 cpu_relax(); 655 658 } 656 659 } 657 - #endif 658 660 659 661 /* Halt the receive and transmit queues */ 660 662 void gfar_halt(struct net_device *dev) ··· 661 665 struct gfar_private *priv = netdev_priv(dev); 662 666 struct gfar __iomem *regs = priv->regs; 663 667 u32 tempval; 668 + 669 + gfar_halt_nodisable(dev); 664 670 665 671 /* Disable Rx and Tx */ 666 672 tempval = gfar_read(&regs->maccfg1);
-1
drivers/net/gianfar_sysfs.c
··· 33 33 34 34 #include <asm/uaccess.h> 35 35 #include <linux/module.h> 36 - #include <linux/version.h> 37 36 38 37 #include "gianfar.h" 39 38
-2
drivers/net/ipg.h
··· 7 7 #ifndef __LINUX_IPG_H 8 8 #define __LINUX_IPG_H 9 9 10 - #include <linux/version.h> 11 10 #include <linux/module.h> 12 11 13 12 #include <linux/kernel.h> ··· 20 21 #include <linux/etherdevice.h> 21 22 #include <linux/init.h> 22 23 #include <linux/skbuff.h> 23 - #include <linux/version.h> 24 24 #include <asm/bitops.h> 25 25 26 26 /*
+1
drivers/net/ixgbe/ixgbe_82598.c
··· 190 190 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 191 191 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 192 192 case IXGBE_DEV_ID_82598EB_CX4: 193 + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 193 194 media_type = ixgbe_media_type_fiber; 194 195 break; 195 196 case IXGBE_DEV_ID_82598AT_DUAL_PORT:
+3 -1
drivers/net/ixgbe/ixgbe_main.c
··· 48 48 static const char ixgbe_driver_string[] = 49 49 "Intel(R) 10 Gigabit PCI Express Network Driver"; 50 50 51 - #define DRV_VERSION "1.3.18-k2" 51 + #define DRV_VERSION "1.3.18-k4" 52 52 const char ixgbe_driver_version[] = DRV_VERSION; 53 53 static const char ixgbe_copyright[] = 54 54 "Copyright (c) 1999-2007 Intel Corporation."; ··· 71 71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), 72 72 board_82598 }, 73 73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), 74 + board_82598 }, 75 + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), 74 76 board_82598 }, 75 77 76 78 /* required last entry */
+1
drivers/net/ixgbe/ixgbe_type.h
··· 39 39 #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 40 40 #define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8 41 41 #define IXGBE_DEV_ID_82598EB_CX4 0x10DD 42 + #define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC 42 43 43 44 /* General Registers */ 44 45 #define IXGBE_CTRL 0x00000
-67
drivers/net/loopback.c
··· 64 64 unsigned long bytes; 65 65 }; 66 66 67 - /* KISS: just allocate small chunks and copy bits. 68 - * 69 - * So, in fact, this is documentation, explaining what we expect 70 - * of largesending device modulo TCP checksum, which is ignored for loopback. 71 - */ 72 - 73 - #ifdef LOOPBACK_TSO 74 - static void emulate_large_send_offload(struct sk_buff *skb) 75 - { 76 - struct iphdr *iph = ip_hdr(skb); 77 - struct tcphdr *th = (struct tcphdr *)(skb_network_header(skb) + 78 - (iph->ihl * 4)); 79 - unsigned int doffset = (iph->ihl + th->doff) * 4; 80 - unsigned int mtu = skb_shinfo(skb)->gso_size + doffset; 81 - unsigned int offset = 0; 82 - u32 seq = ntohl(th->seq); 83 - u16 id = ntohs(iph->id); 84 - 85 - while (offset + doffset < skb->len) { 86 - unsigned int frag_size = min(mtu, skb->len - offset) - doffset; 87 - struct sk_buff *nskb = alloc_skb(mtu + 32, GFP_ATOMIC); 88 - 89 - if (!nskb) 90 - break; 91 - skb_reserve(nskb, 32); 92 - skb_set_mac_header(nskb, -ETH_HLEN); 93 - skb_reset_network_header(nskb); 94 - iph = ip_hdr(nskb); 95 - skb_copy_to_linear_data(nskb, skb_network_header(skb), 96 - doffset); 97 - if (skb_copy_bits(skb, 98 - doffset + offset, 99 - nskb->data + doffset, 100 - frag_size)) 101 - BUG(); 102 - skb_put(nskb, doffset + frag_size); 103 - nskb->ip_summed = CHECKSUM_UNNECESSARY; 104 - nskb->dev = skb->dev; 105 - nskb->priority = skb->priority; 106 - nskb->protocol = skb->protocol; 107 - nskb->dst = dst_clone(skb->dst); 108 - memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 109 - nskb->pkt_type = skb->pkt_type; 110 - 111 - th = (struct tcphdr *)(skb_network_header(nskb) + iph->ihl * 4); 112 - iph->tot_len = htons(frag_size + doffset); 113 - iph->id = htons(id); 114 - iph->check = 0; 115 - iph->check = ip_fast_csum((unsigned char *) iph, iph->ihl); 116 - th->seq = htonl(seq); 117 - if (offset + doffset + frag_size < skb->len) 118 - th->fin = th->psh = 0; 119 - netif_rx(nskb); 120 - offset += frag_size; 121 - seq += frag_size; 122 - id++; 123 - } 124 - 125 - dev_kfree_skb(skb); 126 - } 127 - #endif /* LOOPBACK_TSO */ 128 - 129 67 /* 130 68 * The higher levels take care of making this non-reentrant (it's 131 69 * called with bh's disabled). ··· 75 137 skb_orphan(skb); 76 138 77 139 skb->protocol = eth_type_trans(skb,dev); 78 - #ifndef LOOPBACK_MUST_CHECKSUM 79 - skb->ip_summed = CHECKSUM_UNNECESSARY; 80 - #endif 81 140 82 141 #ifdef LOOPBACK_TSO 83 142 if (skb_is_gso(skb)) { ··· 169 234 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ 170 235 dev->flags = IFF_LOOPBACK; 171 236 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST 172 - #ifdef LOOPBACK_TSO 173 237 | NETIF_F_TSO 174 - #endif 175 238 | NETIF_F_NO_CSUM 176 239 | NETIF_F_HIGHDMA 177 240 | NETIF_F_LLTX
+5 -2
drivers/net/myri10ge/myri10ge.c
··· 56 56 #include <linux/ethtool.h> 57 57 #include <linux/firmware.h> 58 58 #include <linux/delay.h> 59 - #include <linux/version.h> 60 59 #include <linux/timer.h> 61 60 #include <linux/vmalloc.h> 62 61 #include <linux/crc32.h> ··· 3547 3548 3548 3549 /* try to load the slice aware rss firmware */ 3549 3550 old_fw = mgp->fw_name; 3550 - if (old_fw == myri10ge_fw_aligned) 3551 + if (myri10ge_fw_name != NULL) { 3552 + dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n", 3553 + myri10ge_fw_name); 3554 + mgp->fw_name = myri10ge_fw_name; 3555 + } else if (old_fw == myri10ge_fw_aligned) 3551 3556 mgp->fw_name = myri10ge_fw_rss_aligned; 3552 3557 else 3553 3558 mgp->fw_name = myri10ge_fw_rss_unaligned;
+2 -2
drivers/net/ne.c
··· 118 118 {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */ 119 119 {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */ 120 120 {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */ 121 - #if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938) 121 + #ifdef CONFIG_MACH_TX49XX 122 122 {"RBHMA4X00-RTL8019", "RBHMA4X00/RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */ 123 123 #endif 124 124 {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */ ··· 142 142 #if defined(CONFIG_PLAT_MAPPI) 143 143 # define DCR_VAL 0x4b 144 144 #elif defined(CONFIG_PLAT_OAKS32R) || \ 145 - defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938) 145 + defined(CONFIG_MACH_TX49XX) 146 146 # define DCR_VAL 0x48 /* 8-bit mode */ 147 147 #else 148 148 # define DCR_VAL 0x49
+4 -4
drivers/net/netxen/netxen_nic.h
··· 45 45 #include <linux/in.h> 46 46 #include <linux/tcp.h> 47 47 #include <linux/skbuff.h> 48 - #include <linux/version.h> 49 48 50 49 #include <linux/ethtool.h> 51 50 #include <linux/mii.h> ··· 65 66 66 67 #define _NETXEN_NIC_LINUX_MAJOR 4 67 68 #define _NETXEN_NIC_LINUX_MINOR 0 68 - #define _NETXEN_NIC_LINUX_SUBVERSION 0 69 - #define NETXEN_NIC_LINUX_VERSIONID "4.0.0" 69 + #define _NETXEN_NIC_LINUX_SUBVERSION 11 70 + #define NETXEN_NIC_LINUX_VERSIONID "4.0.11" 70 71 71 72 #define NETXEN_VERSION_CODE(a, b, c) (((a) << 16) + ((b) << 8) + (c)) 72 73 ··· 1614 1615 1615 1616 1616 1617 int netxen_is_flash_supported(struct netxen_adapter *adapter); 1617 - int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 mac[]); 1618 + int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac); 1619 + int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac); 1618 1620 extern void netxen_change_ringparam(struct netxen_adapter *adapter); 1619 1621 extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, 1620 1622 int *valp);
-1
drivers/net/netxen/netxen_nic_ethtool.c
··· 38 38 #include <asm/io.h> 39 39 #include <linux/netdevice.h> 40 40 #include <linux/ethtool.h> 41 - #include <linux/version.h> 42 41 43 42 #include "netxen_nic.h" 44 43 #include "netxen_nic_hw.h"
-2
drivers/net/netxen/netxen_nic_hdr.h
··· 32 32 33 33 #include <linux/module.h> 34 34 #include <linux/kernel.h> 35 - #include <linux/version.h> 36 - 37 35 #include <linux/spinlock.h> 38 36 #include <asm/irq.h> 39 37 #include <linux/init.h>
+42 -17
drivers/net/netxen/netxen_nic_hw.c
··· 733 733 return 0; 734 734 } 735 735 736 - int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 mac[]) 736 + int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac) 737 737 { 738 - __le32 *pmac = (__le32 *) & mac[0]; 738 + __le32 *pmac = (__le32 *) mac; 739 + u32 offset; 739 740 740 - if (netxen_get_flash_block(adapter, 741 - NETXEN_USER_START + 742 - offsetof(struct netxen_new_user_info, 743 - mac_addr), 744 - FLASH_NUM_PORTS * sizeof(u64), pmac) == -1) { 741 + offset = NETXEN_USER_START + 742 + offsetof(struct netxen_new_user_info, mac_addr) + 743 + adapter->portnum * sizeof(u64); 744 + 745 + if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1) 745 746 return -1; 746 - } 747 + 747 748 if (*mac == cpu_to_le64(~0ULL)) { 749 + 750 + offset = NETXEN_USER_START_OLD + 751 + offsetof(struct netxen_user_old_info, mac_addr) + 752 + adapter->portnum * sizeof(u64); 753 + 748 754 if (netxen_get_flash_block(adapter, 749 - NETXEN_USER_START_OLD + 750 - offsetof(struct netxen_user_old_info, 751 - mac_addr), 752 - FLASH_NUM_PORTS * sizeof(u64), 753 - pmac) == -1) 755 + offset, sizeof(u64), pmac) == -1) 754 756 return -1; 757 + 755 758 if (*mac == cpu_to_le64(~0ULL)) 756 759 return -1; 757 760 } 761 + return 0; 762 + } 763 + 764 + int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac) 765 + { 766 + uint32_t crbaddr, mac_hi, mac_lo; 767 + int pci_func = adapter->ahw.pci_func; 768 + 769 + crbaddr = CRB_MAC_BLOCK_START + 770 + (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); 771 + 772 + adapter->hw_read_wx(adapter, crbaddr, &mac_lo, 4); 773 + adapter->hw_read_wx(adapter, crbaddr+4, &mac_hi, 4); 774 + 775 + mac_hi = cpu_to_le32(mac_hi); 776 + mac_lo = cpu_to_le32(mac_lo); 777 + 778 + if (pci_func & 1) 779 + *mac = ((mac_lo >> 16) | ((u64)mac_hi << 16)); 780 + else 781 + *mac = ((mac_lo) | ((u64)mac_hi << 32)); 782 + 758 783 return 0; 759 784 } 760 785 ··· 2208 2183 if (adapter->portnum == 0) { 2209 2184 get_brd_name_by_type(board_info->board_type, brd_name); 2210 2185 2211 - printk("NetXen %s Board S/N %s Chip id 0x%x\n", 2212 - brd_name, serial_num, board_info->chip_id); 2213 - printk("NetXen Firmware version %d.%d.%d\n", fw_major, 2214 - fw_minor, fw_build); 2186 + printk(KERN_INFO "NetXen %s Board S/N %s Chip rev 0x%x\n", 2187 + brd_name, serial_num, adapter->ahw.revision_id); 2188 + printk(KERN_INFO "NetXen Firmware version %d.%d.%d\n", 2189 + fw_major, fw_minor, fw_build); 2215 2190 } 2216 2191 2217 2192 if (NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build) <
+15 -13
drivers/net/netxen/netxen_nic_init.c
··· 1079 1079 1080 1080 void netxen_free_adapter_offload(struct netxen_adapter *adapter) 1081 1081 { 1082 - int i; 1082 + int i = 100; 1083 1083 1084 - if (adapter->dummy_dma.addr) { 1085 - i = 100; 1084 + if (!adapter->dummy_dma.addr) 1085 + return; 1086 + 1087 + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1086 1088 do { 1087 1089 if (dma_watchdog_shutdown_request(adapter) == 1) 1088 1090 break; ··· 1092 1090 if (dma_watchdog_shutdown_poll_result(adapter) == 1) 1093 1091 break; 1094 1092 } while (--i); 1093 + } 1095 1094 1096 - if (i) { 1097 - pci_free_consistent(adapter->pdev, 1098 - NETXEN_HOST_DUMMY_DMA_SIZE, 1099 - adapter->dummy_dma.addr, 1100 - adapter->dummy_dma.phys_addr); 1101 - adapter->dummy_dma.addr = NULL; 1102 - } else { 1103 - printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n", 1104 - adapter->netdev->name); 1105 - } 1095 + if (i) { 1096 + pci_free_consistent(adapter->pdev, 1097 + NETXEN_HOST_DUMMY_DMA_SIZE, 1098 + adapter->dummy_dma.addr, 1099 + adapter->dummy_dma.phys_addr); 1100 + adapter->dummy_dma.addr = NULL; 1101 + } else { 1102 + printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n", 1103 + adapter->netdev->name); 1106 1104 } 1107 1105 } 1108 1106
+96 -114
drivers/net/netxen/netxen_nic_main.c
··· 149 149 150 150 static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; 151 151 152 - static void netxen_nic_disable_int(struct netxen_adapter *adapter) 152 + static inline void netxen_nic_disable_int(struct netxen_adapter *adapter) 153 153 { 154 - u32 mask = 0x7ff; 155 - int retries = 32; 156 - int pci_fn = adapter->ahw.pci_func; 157 - 158 - if (adapter->msi_mode != MSI_MODE_MULTIFUNC) 159 - adapter->pci_write_normalize(adapter, 160 - adapter->crb_intr_mask, 0); 161 - 162 - if (adapter->intr_scheme != -1 && 163 - adapter->intr_scheme != INTR_SCHEME_PERPORT) 164 - adapter->pci_write_immediate(adapter, ISR_INT_MASK, mask); 165 - 166 - if (!NETXEN_IS_MSI_FAMILY(adapter)) { 167 - do { 168 - adapter->pci_write_immediate(adapter, 169 - adapter->legacy_intr.tgt_status_reg, 170 - 0xffffffff); 171 - mask = adapter->pci_read_immediate(adapter, 172 - ISR_INT_VECTOR); 173 - if (!(mask & 0x80)) 174 - break; 175 - udelay(10); 176 - } while (--retries); 177 - 178 - if (!retries) { 179 - printk(KERN_NOTICE "%s: Failed to disable interrupt\n", 180 - netxen_nic_driver_name); 181 - } 182 - } else { 183 - if (adapter->msi_mode == MSI_MODE_MULTIFUNC) { 184 - adapter->pci_write_immediate(adapter, 185 - msi_tgt_status[pci_fn], 0xffffffff); 186 - } 187 - } 154 + adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0); 188 155 } 189 156 190 - static void netxen_nic_enable_int(struct netxen_adapter *adapter) 157 + static inline void netxen_nic_enable_int(struct netxen_adapter *adapter) 191 158 { 192 - u32 mask; 193 - 194 - if (adapter->intr_scheme != -1 && 195 - adapter->intr_scheme != INTR_SCHEME_PERPORT) { 196 - switch (adapter->ahw.board_type) { 197 - case NETXEN_NIC_GBE: 198 - mask = 0x77b; 199 - break; 200 - case NETXEN_NIC_XGBE: 201 - mask = 0x77f; 202 - break; 203 - default: 204 - mask = 0x7ff; 205 - break; 206 - } 207 - 208 - adapter->pci_write_immediate(adapter, ISR_INT_MASK, mask); 209 - } 210 - 211 159 adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0x1); 212 160 213 - if (!NETXEN_IS_MSI_FAMILY(adapter)) { 214 - mask = 0xbff; 215 - if (adapter->intr_scheme == INTR_SCHEME_PERPORT) 216 - adapter->pci_write_immediate(adapter, 217 - adapter->legacy_intr.tgt_mask_reg, mask); 218 - else 219 - adapter->pci_write_normalize(adapter, 220 - CRB_INT_VECTOR, 0); 221 - } 161 + if (!NETXEN_IS_MSI_FAMILY(adapter)) 162 + adapter->pci_write_immediate(adapter, 163 + adapter->legacy_intr.tgt_mask_reg, 0xfbff); 222 164 } 223 165 224 166 static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) ··· 443 501 adapter->msix_entries[i].entry = i; 444 502 } 445 503 504 + static int 505 + netxen_read_mac_addr(struct netxen_adapter *adapter) 506 + { 507 + int i; 508 + unsigned char *p; 509 + __le64 mac_addr; 510 + DECLARE_MAC_BUF(mac); 511 + struct net_device *netdev = adapter->netdev; 512 + struct pci_dev *pdev = adapter->pdev; 513 + 514 + if (netxen_is_flash_supported(adapter) != 0) 515 + return -EIO; 516 + 517 + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 518 + if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0) 519 + return -EIO; 520 + } else { 521 + if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0) 522 + return -EIO; 523 + } 524 + 525 + p = (unsigned char *)&mac_addr; 526 + for (i = 0; i < 6; i++) 527 + netdev->dev_addr[i] = *(p + 5 - i); 528 + 529 + memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); 530 + 531 + /* set station address */ 532 + 533 + if (!is_valid_ether_addr(netdev->perm_addr)) { 534 + dev_warn(&pdev->dev, "Bad MAC address %s.\n", 535 + print_mac(mac, netdev->dev_addr)); 536 + } else 537 + adapter->macaddr_set(adapter, netdev->dev_addr); 538 + 539 + return 0; 540 + } 541 + 446 542 /* 447 543 * netxen_nic_probe() 448 544 * ··· 509 529 unsigned long mem_base, mem_len, db_base, db_len, pci_len0 = 0; 510 530 int i = 0, err; 511 531 int first_driver, first_boot; 512 - __le64 mac_addr[FLASH_NUM_PORTS + 1]; 513 532 u32 val; 514 533 int pci_func_id = PCI_FUNC(pdev->devfn); 515 - DECLARE_MAC_BUF(mac); 516 534 struct netxen_legacy_intr_set *legacy_intrp; 517 535 uint8_t revision_id; 518 536 ··· 520 542 if (pdev->class != 0x020000) { 521 543 printk(KERN_DEBUG "NetXen function %d, class %x will not " 522 544 "be enabled.\n",pci_func_id, pdev->class); 545 + return -ENODEV; 546 + } 547 + 548 + if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) { 549 + printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x" 550 + "will not be enabled.\n", 551 + NX_P3_A0, NX_P3_B1); 523 552 return -ENODEV; 524 553 } 525 554 ··· 883 898 goto err_out_disable_msi; 884 899 885 900 init_timer(&adapter->watchdog_timer); 886 - adapter->ahw.linkup = 0; 887 901 adapter->watchdog_timer.function = &netxen_watchdog; 888 902 adapter->watchdog_timer.data = (unsigned long)adapter; 889 903 INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task); 890 904 INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); 891 905 892 - if (netxen_is_flash_supported(adapter) == 0 && 893 - netxen_get_flash_mac_addr(adapter, mac_addr) == 0) { 894 - unsigned char *p; 895 - 896 - p = (unsigned char *)&mac_addr[adapter->portnum]; 897 - netdev->dev_addr[0] = *(p + 5); 898 - netdev->dev_addr[1] = *(p + 4); 899 - netdev->dev_addr[2] = *(p + 3); 900 - netdev->dev_addr[3] = *(p + 2); 901 - netdev->dev_addr[4] = *(p + 1); 902 - netdev->dev_addr[5] = *(p + 0); 903 - 904 - memcpy(netdev->perm_addr, netdev->dev_addr, 905 - netdev->addr_len); 906 - if (!is_valid_ether_addr(netdev->perm_addr)) { 907 - printk(KERN_ERR "%s: Bad MAC address %s.\n", 908 - netxen_nic_driver_name, 909 - print_mac(mac, netdev->dev_addr)); 910 - } else { 911 - adapter->macaddr_set(adapter, netdev->dev_addr); 912 - } 913 - } 906 + err = netxen_read_mac_addr(adapter); 907 + if (err) 908 + dev_warn(&pdev->dev, "failed to read mac addr\n"); 914 909 915 910 netif_carrier_off(netdev); 916 911 netif_stop_queue(netdev); ··· 965 1000 966 1001 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { 967 1002 netxen_free_hw_resources(adapter); 1003 + netxen_release_rx_buffers(adapter); 968 1004 netxen_free_sw_resources(adapter); 969 1005 } 970 1006 ··· 1035 1069 goto err_out_free_sw; 1036 1070 } 1037 1071 1072 + if ((adapter->msi_mode != MSI_MODE_MULTIFUNC) || 1073 + (adapter->intr_scheme != INTR_SCHEME_PERPORT)) { 1074 + printk(KERN_ERR "%s: Firmware interrupt scheme is " 1075 + "incompatible with driver\n", 1076 + netdev->name); 1077 + adapter->driver_mismatch = 1; 1078 + goto err_out_free_hw; 1079 + } 1080 + 1038 1081 if (adapter->fw_major < 4) { 1039 1082 adapter->crb_addr_cmd_producer = 1040 1083 crb_cmd_producer[adapter->portnum]; ··· 1069 1094 flags, netdev->name, adapter); 1070 1095 if (err) { 1071 1096 printk(KERN_ERR "request_irq failed with: %d\n", err); 1072 - goto err_out_free_hw; 1097 + goto err_out_free_rxbuf; 1073 1098 } 1074 1099 1075 1100 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; ··· 1091 1116 if (adapter->set_mtu) 1092 1117 adapter->set_mtu(adapter, netdev->mtu); 1093 1118 1119 + adapter->ahw.linkup = 0; 1094 1120 mod_timer(&adapter->watchdog_timer, jiffies); 1095 1121 1096 1122 napi_enable(&adapter->napi); ··· 1103 1127 1104 1128 err_out_free_irq: 1105 1129 free_irq(adapter->irq, adapter); 1130 + err_out_free_rxbuf: 1131 + netxen_release_rx_buffers(adapter); 1106 1132 err_out_free_hw: 1107 1133 netxen_free_hw_resources(adapter); 1108 1134 err_out_free_sw: ··· 1130 1152 1131 1153 netxen_release_tx_buffers(adapter); 1132 1154 1133 - if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { 1134 - FLUSH_SCHEDULED_WORK(); 1135 - del_timer_sync(&adapter->watchdog_timer); 1136 - } 1155 + FLUSH_SCHEDULED_WORK(); 1156 + del_timer_sync(&adapter->watchdog_timer); 1137 1157 1138 1158 return 0; 1139 1159 } ··· 1434 1458 1435 1459 netxen_nic_handle_phy_intr(adapter); 1436 1460 1437 - mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); 1461 + if (netif_running(adapter->netdev)) 1462 + mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); 1438 1463 } 1439 1464 1440 1465 static void netxen_tx_timeout(struct net_device *netdev) ··· 1495 1518 return stats; 1496 1519 } 1497 1520 1498 - static inline void 1499 - netxen_handle_int(struct netxen_adapter *adapter) 1500 - { 1501 - netxen_nic_disable_int(adapter); 1502 - napi_schedule(&adapter->napi); 1503 - } 1504 - 1505 1521 static irqreturn_t netxen_intr(int irq, void *data) 1506 1522 { 1507 1523 struct netxen_adapter *adapter = data; 1508 - u32 our_int = 0; 1509 - 1510 1524 u32 status = 0; 1511 1525 1512 1526 status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); ··· 1512 1544 if (!ISR_LEGACY_INT_TRIGGERED(status)) 1513 1545 return IRQ_NONE; 1514 1546 1515 - } else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1547 + } else { 1548 + unsigned long our_int = 0; 1516 1549 1517 1550 our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR); 1551 + 1518 1552 /* not our interrupt */ 1519 - if ((our_int & (0x80 << adapter->portnum)) == 0) 1553 + if (!test_and_clear_bit((7 + adapter->portnum), &our_int)) 1520 1554 return IRQ_NONE; 1521 1555 1522 - if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { 1523 - /* claim interrupt */ 1524 - adapter->pci_write_normalize(adapter, 1525 - CRB_INT_VECTOR, 1526 - our_int & ~((u32)(0x80 << adapter->portnum))); 1527 - } 1556 + /* claim interrupt */ 1557 + adapter->pci_write_normalize(adapter, 1558 + CRB_INT_VECTOR, (our_int & 0xffffffff)); 1528 1559 } 1529 1560 1530 - netxen_handle_int(adapter); 1561 + /* clear interrupt */ 1562 + if (adapter->fw_major < 4) 1563 + netxen_nic_disable_int(adapter); 1564 + 1565 + adapter->pci_write_immediate(adapter, 1566 + adapter->legacy_intr.tgt_status_reg, 1567 + 0xffffffff); 1568 + /* read twice to ensure write is flushed */ 1569 + adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); 1570 + adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); 1571 + 1572 + napi_schedule(&adapter->napi); 1531 1573 1532 1574 return IRQ_HANDLED; 1533 1575 } ··· 1546 1568 { 1547 1569 struct netxen_adapter *adapter = data; 1548 1570 1549 - netxen_handle_int(adapter); 1571 + /* clear interrupt */ 1572 + adapter->pci_write_immediate(adapter, 1573 + msi_tgt_status[adapter->ahw.pci_func], 0xffffffff); 1574 + 1575 + napi_schedule(&adapter->napi); 1550 1576 return IRQ_HANDLED; 1551 1577 } 1552 1578
+2
drivers/net/netxen/netxen_nic_phan_reg.h
··· 125 125 #define CRB_SW_INT_MASK_2 NETXEN_NIC_REG(0x1e4) 126 126 #define CRB_SW_INT_MASK_3 NETXEN_NIC_REG(0x1e8) 127 127 128 + #define CRB_MAC_BLOCK_START NETXEN_CAM_RAM(0x1c0) 129 + 128 130 /* 129 131 * capabilities register, can be used to selectively enable/disable features 130 132 * for backward compability
-1
drivers/net/ppp_mppe.c
··· 46 46 #include <linux/err.h> 47 47 #include <linux/module.h> 48 48 #include <linux/kernel.h> 49 - #include <linux/version.h> 50 49 #include <linux/init.h> 51 50 #include <linux/types.h> 52 51 #include <linux/slab.h>
-1
drivers/net/pppol2tp.c
··· 61 61 */ 62 62 63 63 #include <linux/module.h> 64 - #include <linux/version.h> 65 64 #include <linux/string.h> 66 65 #include <linux/list.h> 67 66 #include <asm/uaccess.h>
-1
drivers/net/r6040.c
··· 24 24 25 25 #include <linux/kernel.h> 26 26 #include <linux/module.h> 27 - #include <linux/version.h> 28 27 #include <linux/moduleparam.h> 29 28 #include <linux/string.h> 30 29 #include <linux/timer.h>
-1
drivers/net/sh_eth.c
··· 20 20 * the file called "COPYING". 21 21 */ 22 22 23 - #include <linux/version.h> 24 23 #include <linux/init.h> 25 24 #include <linux/dma-mapping.h> 26 25 #include <linux/etherdevice.h>
+6 -2
drivers/net/sky2.c
··· 24 24 25 25 #include <linux/crc32.h> 26 26 #include <linux/kernel.h> 27 - #include <linux/version.h> 28 27 #include <linux/module.h> 29 28 #include <linux/netdevice.h> 30 29 #include <linux/dma-mapping.h> ··· 665 666 666 667 if (hw->chip_id != CHIP_ID_YUKON_EC) { 667 668 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { 668 - ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); 669 + /* select page 2 to access MAC control register */ 670 + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); 669 671 672 + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); 670 673 /* enable Power Down */ 671 674 ctrl |= PHY_M_PC_POW_D_ENA; 672 675 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); 676 + 677 + /* set page register back to 0 */ 678 + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); 673 679 } 674 680 675 681 /* set IEEE compatible Power Down Mode (dev. #4.99) */
-1
drivers/net/tehuti.h
··· 27 27 #include <linux/sched.h> 28 28 #include <linux/tty.h> 29 29 #include <linux/if_vlan.h> 30 - #include <linux/version.h> 31 30 #include <linux/interrupt.h> 32 31 #include <linux/vmalloc.h> 33 32 #include <asm/byteorder.h>
+78 -23
drivers/net/tg3.c
··· 66 66 67 67 #define DRV_MODULE_NAME "tg3" 68 68 #define PFX DRV_MODULE_NAME ": " 69 - #define DRV_MODULE_VERSION "3.93" 70 - #define DRV_MODULE_RELDATE "May 22, 2008" 69 + #define DRV_MODULE_VERSION "3.94" 70 + #define DRV_MODULE_RELDATE "August 14, 2008" 71 71 72 72 #define TG3_DEF_MAC_MODE 0 73 73 #define TG3_DEF_RX_MODE 0 ··· 536 536 return 0; 537 537 538 538 switch (locknum) { 539 + case TG3_APE_LOCK_GRC: 539 540 case TG3_APE_LOCK_MEM: 540 541 break; 541 542 default: ··· 574 573 return; 575 574 576 575 switch (locknum) { 576 + case TG3_APE_LOCK_GRC: 577 577 case TG3_APE_LOCK_MEM: 578 578 break; 579 579 default: ··· 1020 1018 } 1021 1019 1022 1020 /* tp->lock is held. */ 1021 + static inline void tg3_generate_fw_event(struct tg3 *tp) 1022 + { 1023 + u32 val; 1024 + 1025 + val = tr32(GRC_RX_CPU_EVENT); 1026 + val |= GRC_RX_CPU_DRIVER_EVENT; 1027 + tw32_f(GRC_RX_CPU_EVENT, val); 1028 + 1029 + tp->last_event_jiffies = jiffies; 1030 + } 1031 + 1032 + #define TG3_FW_EVENT_TIMEOUT_USEC 2500 1033 + 1034 + /* tp->lock is held. */ 1023 1035 static void tg3_wait_for_event_ack(struct tg3 *tp) 1024 1036 { 1025 1037 int i; 1038 + unsigned int delay_cnt; 1039 + long time_remain; 1026 1040 1027 - /* Wait for up to 2.5 milliseconds */ 1028 - for (i = 0; i < 250000; i++) { 1041 + /* If enough time has passed, no wait is necessary. */ 1042 + time_remain = (long)(tp->last_event_jiffies + 1 + 1043 + usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - 1044 + (long)jiffies; 1045 + if (time_remain < 0) 1046 + return; 1047 + 1048 + /* Check if we can shorten the wait time. */ 1049 + delay_cnt = jiffies_to_usecs(time_remain); 1050 + if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) 1051 + delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; 1052 + delay_cnt = (delay_cnt >> 3) + 1; 1053 + 1054 + for (i = 0; i < delay_cnt; i++) { 1029 1055 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1030 1056 break; 1031 - udelay(10); 1057 + udelay(8); 1032 1058 } 1033 1059 } 1034 1060 ··· 1105 1075 val = 0; 1106 1076 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val); 1107 1077 1108 - val = tr32(GRC_RX_CPU_EVENT); 1109 - val |= GRC_RX_CPU_DRIVER_EVENT; 1110 - tw32_f(GRC_RX_CPU_EVENT, val); 1078 + tg3_generate_fw_event(tp); 1111 1079 } 1112 1080 1113 1081 static void tg3_link_report(struct tg3 *tp) ··· 2151 2123 if (pci_pme_capable(tp->pdev, state) && 2152 2124 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) 2153 2125 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 2126 + 2127 + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 2128 + mac_mode |= tp->mac_mode & 2129 + (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); 2130 + if (mac_mode & MAC_MODE_APE_TX_EN) 2131 + mac_mode |= MAC_MODE_TDE_ENABLE; 2132 + } 2154 2133 2155 2134 tw32_f(MAC_MODE, mac_mode); 2156 2135 udelay(100); ··· 5528 5493 return; 5529 5494 5530 5495 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 5531 - if (apedata != APE_FW_STATUS_READY) 5496 + if (!(apedata & APE_FW_STATUS_READY)) 5532 5497 return; 5533 5498 5534 5499 /* Wait for up to 1 millisecond for APE to service previous event. */ ··· 5795 5760 5796 5761 tg3_mdio_stop(tp); 5797 5762 5763 + tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 5764 + 5798 5765 /* No matching tg3_nvram_unlock() after this because 5799 5766 * chip reset below will undo the nvram lock. 5800 5767 */ ··· 5945 5908 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { 5946 5909 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 5947 5910 tw32_f(MAC_MODE, tp->mac_mode); 5911 + } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 5912 + tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); 5913 + if (tp->mac_mode & MAC_MODE_APE_TX_EN) 5914 + tp->mac_mode |= MAC_MODE_TDE_ENABLE; 5915 + tw32_f(MAC_MODE, tp->mac_mode); 5948 5916 } else 5949 5917 tw32_f(MAC_MODE, 0); 5950 5918 udelay(40); 5951 5919 5952 5920 tg3_mdio_start(tp); 5921 + 5922 + tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 5953 5923 5954 5924 err = tg3_poll_fw(tp); 5955 5925 if (err) ··· 5979 5935 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 5980 5936 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 5981 5937 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; 5938 + tp->last_event_jiffies = jiffies; 5982 5939 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 5983 5940 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; 5984 5941 } ··· 5993 5948 { 5994 5949 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && 5995 5950 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { 5996 - u32 val; 5997 - 5998 5951 /* Wait for RX cpu to ACK the previous event. */ 5999 5952 tg3_wait_for_event_ack(tp); 6000 5953 6001 5954 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); 6002 - val = tr32(GRC_RX_CPU_EVENT); 6003 - val |= GRC_RX_CPU_DRIVER_EVENT; 6004 - tw32(GRC_RX_CPU_EVENT, val); 5955 + 5956 + tg3_generate_fw_event(tp); 6005 5957 6006 5958 /* Wait for RX cpu to ACK this event. */ 6007 5959 tg3_wait_for_event_ack(tp); ··· 7448 7406 udelay(10); 7449 7407 } 7450 7408 7451 - tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 7409 + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 7410 + tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 7411 + else 7412 + tp->mac_mode = 0; 7413 + tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 7452 7414 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; 7453 7415 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 7454 7416 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && ··· 7886 7840 * resets. 7887 7841 */ 7888 7842 if (!--tp->asf_counter) { 7889 - if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { 7890 - u32 val; 7891 - 7843 + if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && 7844 + !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { 7892 7845 tg3_wait_for_event_ack(tp); 7893 7846 7894 7847 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, ··· 7895 7850 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 7896 7851 /* 5 seconds timeout */ 7897 7852 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); 7898 - val = tr32(GRC_RX_CPU_EVENT); 7899 - val |= GRC_RX_CPU_DRIVER_EVENT; 7900 - tw32_f(GRC_RX_CPU_EVENT, val); 7853 + 7854 + tg3_generate_fw_event(tp); 7901 7855 } 7902 7856 tp->asf_counter = tp->asf_multiplier; 7903 7857 } ··· 8466 8422 return ret; 8467 8423 } 8468 8424 8425 + static inline u64 get_estat64(tg3_stat64_t *val) 8426 + { 8427 + return ((u64)val->high << 32) | ((u64)val->low); 8428 + } 8429 + 8469 8430 static unsigned long calc_crc_errors(struct tg3 *tp) 8470 8431 { 8471 8432 struct tg3_hw_stats *hw_stats = tp->hw_stats; ··· 8499 8450 8500 8451 #define ESTAT_ADD(member) \ 8501 8452 estats->member = old_estats->member + \ 8502 - get_stat64(&hw_stats->member) 8453 + get_estat64(&hw_stats->member) 8503 8454 8504 8455 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) 8505 8456 { ··· 12465 12416 tp->misc_host_ctrl); 12466 12417 } 12467 12418 12419 + /* Preserve the APE MAC_MODE bits */ 12420 + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 12421 + tp->mac_mode = tr32(MAC_MODE) | 12422 + MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 12423 + else 12424 + tp->mac_mode = TG3_DEF_MAC_MODE; 12425 + 12468 12426 /* these are limited to 10/100 only */ 12469 12427 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 12470 12428 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || ··· 13331 13275 tp->pdev = pdev; 13332 13276 tp->dev = dev; 13333 13277 tp->pm_cap = pm_cap; 13334 - tp->mac_mode = TG3_DEF_MAC_MODE; 13335 13278 tp->rx_mode = TG3_DEF_RX_MODE; 13336 13279 tp->tx_mode = TG3_DEF_TX_MODE; 13337 13280
+6
drivers/net/tg3.h
··· 325 325 #define MAC_MODE_TDE_ENABLE 0x00200000 326 326 #define MAC_MODE_RDE_ENABLE 0x00400000 327 327 #define MAC_MODE_FHDE_ENABLE 0x00800000 328 + #define MAC_MODE_APE_RX_EN 0x08000000 329 + #define MAC_MODE_APE_TX_EN 0x10000000 328 330 #define MAC_STATUS 0x00000404 329 331 #define MAC_STATUS_PCS_SYNCED 0x00000001 330 332 #define MAC_STATUS_SIGNAL_DET 0x00000002 ··· 1891 1889 #define APE_EVENT_STATUS_EVENT_PENDING 0x80000000 1892 1890 1893 1891 /* APE convenience enumerations. */ 1892 + #define TG3_APE_LOCK_GRC 1 1894 1893 #define TG3_APE_LOCK_MEM 4 1895 1894 1896 1895 #define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 ··· 2432 2429 struct tg3_ethtool_stats estats; 2433 2430 struct tg3_ethtool_stats estats_prev; 2434 2431 2432 + union { 2435 2433 unsigned long phy_crc_errors; 2434 + unsigned long last_event_jiffies; 2435 + }; 2436 2436 2437 2437 u32 rx_offset; 2438 2438 u32 tg3_flags;
+4 -4
drivers/net/tlan.c
··· 360 360 { 361 361 unsigned long addr; 362 362 363 - addr = tag->buffer[8].address; 364 - addr |= (tag->buffer[9].address << 16) << 16; 363 + addr = tag->buffer[9].address; 364 + addr |= (tag->buffer[8].address << 16) << 16; 365 365 return (struct sk_buff *) addr; 366 366 } 367 367 ··· 1984 1984 TLanList *list; 1985 1985 dma_addr_t list_phys; 1986 1986 struct sk_buff *skb; 1987 - void *t = NULL; 1988 1987 1989 1988 priv->txHead = 0; 1990 1989 priv->txTail = 0; ··· 2021 2022 } 2022 2023 2023 2024 skb_reserve( skb, NET_IP_ALIGN ); 2024 - list->buffer[0].address = pci_map_single(priv->pciDev, t, 2025 + list->buffer[0].address = pci_map_single(priv->pciDev, 2026 + skb->data, 2025 2027 TLAN_MAX_FRAME_SIZE, 2026 2028 PCI_DMA_FROMDEVICE); 2027 2029 TLan_StoreSKB(list, skb);
-1
drivers/net/tokenring/lanstreamer.c
··· 119 119 #include <linux/pci.h> 120 120 #include <linux/dma-mapping.h> 121 121 #include <linux/spinlock.h> 122 - #include <linux/version.h> 123 122 #include <linux/bitops.h> 124 123 #include <linux/jiffies.h> 125 124
-2
drivers/net/tokenring/lanstreamer.h
··· 60 60 * 61 61 */ 62 62 63 - #include <linux/version.h> 64 - 65 63 /* MAX_INTR - the maximum number of times we can loop 66 64 * inside the interrupt function before returning 67 65 * control to the OS (maximum value is 256)
+101 -4
drivers/net/tun.c
··· 358 358 return mask; 359 359 } 360 360 361 + /* prepad is the amount to reserve at front. len is length after that. 362 + * linear is a hint as to how much to copy (usually headers). */ 363 + static struct sk_buff *tun_alloc_skb(size_t prepad, size_t len, size_t linear, 364 + gfp_t gfp) 365 + { 366 + struct sk_buff *skb; 367 + unsigned int i; 368 + 369 + skb = alloc_skb(prepad + len, gfp|__GFP_NOWARN); 370 + if (skb) { 371 + skb_reserve(skb, prepad); 372 + skb_put(skb, len); 373 + return skb; 374 + } 375 + 376 + /* Under a page? Don't bother with paged skb. */ 377 + if (prepad + len < PAGE_SIZE) 378 + return NULL; 379 + 380 + /* Start with a normal skb, and add pages. */ 381 + skb = alloc_skb(prepad + linear, gfp); 382 + if (!skb) 383 + return NULL; 384 + 385 + skb_reserve(skb, prepad); 386 + skb_put(skb, linear); 387 + 388 + len -= linear; 389 + 390 + for (i = 0; i < MAX_SKB_FRAGS; i++) { 391 + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 392 + 393 + f->page = alloc_page(gfp|__GFP_ZERO); 394 + if (!f->page) 395 + break; 396 + 397 + f->page_offset = 0; 398 + f->size = PAGE_SIZE; 399 + 400 + skb->data_len += PAGE_SIZE; 401 + skb->len += PAGE_SIZE; 402 + skb->truesize += PAGE_SIZE; 403 + skb_shinfo(skb)->nr_frags++; 404 + 405 + if (len < PAGE_SIZE) { 406 + len = 0; 407 + break; 408 + } 409 + len -= PAGE_SIZE; 410 + } 411 + 412 + /* Too large, or alloc fail? */ 413 + if (unlikely(len)) { 414 + kfree_skb(skb); 415 + skb = NULL; 416 + } 417 + 418 + return skb; 419 + } 420 + 361 421 /* Get packet from user space buffer */ 362 422 static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count) 363 423 { ··· 451 391 return -EINVAL; 452 392 } 453 393 454 - if (!(skb = alloc_skb(len + align, GFP_KERNEL))) { 394 + if (!(skb = tun_alloc_skb(align, len, gso.hdr_len, GFP_KERNEL))) { 455 395 tun->dev->stats.rx_dropped++; 456 396 return -ENOMEM; 457 397 } 458 398 459 - if (align) 460 - skb_reserve(skb, align); 461 - if (memcpy_fromiovec(skb_put(skb, len), iv, len)) { 399 + if (skb_copy_datagram_from_iovec(skb, 0, iv, len)) { 462 400 tun->dev->stats.rx_dropped++; 463 401 kfree_skb(skb); 464 402 return -EFAULT; ··· 806 748 return err; 807 749 } 808 750 751 + static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr) 752 + { 753 + struct tun_struct *tun = file->private_data; 754 + 755 + if (!tun) 756 + return -EBADFD; 757 + 758 + DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name); 759 + 760 + strcpy(ifr->ifr_name, tun->dev->name); 761 + 762 + ifr->ifr_flags = 0; 763 + 764 + if (ifr->ifr_flags & TUN_TUN_DEV) 765 + ifr->ifr_flags |= IFF_TUN; 766 + else 767 + ifr->ifr_flags |= IFF_TAP; 768 + 769 + if (tun->flags & TUN_NO_PI) 770 + ifr->ifr_flags |= IFF_NO_PI; 771 + 772 + if (tun->flags & TUN_ONE_QUEUE) 773 + ifr->ifr_flags |= IFF_ONE_QUEUE; 774 + 775 + if (tun->flags & TUN_VNET_HDR) 776 + ifr->ifr_flags |= IFF_VNET_HDR; 777 + 778 + return 0; 779 + } 780 + 809 781 /* This is like a cut-down ethtool ops, except done via tun fd so no 810 782 * privs required. */ 811 783 static int set_offload(struct net_device *dev, unsigned long arg) ··· 921 833 DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); 922 834 923 835 switch (cmd) { 836 + case TUNGETIFF: 837 + ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr); 838 + if (ret) 839 + return ret; 840 + 841 + if (copy_to_user(argp, &ifr, sizeof(ifr))) 842 + return -EFAULT; 843 + break; 844 + 924 845 case TUNSETNOCSUM: 925 846 /* Disable/Enable checksum */ 926 847 if (arg)
-1
drivers/net/typhoon.c
··· 128 128 #include <asm/io.h> 129 129 #include <asm/uaccess.h> 130 130 #include <linux/in6.h> 131 - #include <linux/version.h> 132 131 #include <linux/dma-mapping.h> 133 132 134 133 #include "typhoon.h"
+10 -11
drivers/net/usb/Kconfig
··· 154 154 This driver creates an interface named "ethX", where X depends on 155 155 what other networking devices you have in use. 156 156 157 - config USB_HSO 158 - tristate "Option USB High Speed Mobile Devices" 159 - depends on USB && RFKILL 160 - default n 161 - help 162 - Choose this option if you have an Option HSDPA/HSUPA card. 163 - These cards support downlink speeds of 7.2Mbps or greater. 164 - 165 - To compile this driver as a module, choose M here: the 166 - module will be called hso. 167 - 168 157 config USB_NET_CDCETHER 169 158 tristate "CDC Ethernet support (smart devices such as cable modems)" 170 159 depends on USB_USBNET ··· 326 337 really need this non-conformant variant of CDC Ethernet (or in 327 338 some cases CDC MDLM) protocol, not "g_ether". 328 339 340 + config USB_HSO 341 + tristate "Option USB High Speed Mobile Devices" 342 + depends on USB && RFKILL 343 + default n 344 + help 345 + Choose this option if you have an Option HSDPA/HSUPA card. 346 + These cards support downlink speeds of 7.2Mbps or greater. 347 + 348 + To compile this driver as a module, choose M here: the 349 + module will be called hso. 329 350 330 351 endmenu
+30 -23
drivers/net/usb/hso.c
··· 102 102 103 103 #define MAX_RX_URBS 2 104 104 105 - #define get_serial_by_tty(x) \ 106 - (x ? (struct hso_serial *)x->driver_data : NULL) 105 + static inline struct hso_serial *get_serial_by_tty(struct tty_struct *tty) 106 + { 107 + if (tty) 108 + return tty->driver_data; 109 + return NULL; 110 + } 107 111 108 112 /*****************************************************************************/ 109 113 /* Debugging functions */ ··· 298 294 299 295 /* #define DEBUG */ 300 296 301 - #define dev2net(x) (x->port_data.dev_net) 302 - #define dev2ser(x) (x->port_data.dev_serial) 297 + static inline struct hso_net *dev2net(struct hso_device *hso_dev) 298 + { 299 + return hso_dev->port_data.dev_net; 300 + } 301 + 302 + static inline struct hso_serial *dev2ser(struct hso_device *hso_dev) 303 + { 304 + return hso_dev->port_data.dev_serial; 305 + } 303 306 304 307 /* Debugging functions */ 305 308 #ifdef DEBUG 306 309 static void dbg_dump(int line_count, const char *func_name, unsigned char *buf, 307 310 unsigned int len) 308 311 { 309 - u8 i = 0; 312 + static char name[255]; 310 313 311 - printk(KERN_DEBUG "[%d:%s]: len %d", line_count, func_name, len); 312 - 313 - for (i = 0; i < len; i++) { 314 - if (!(i % 16)) 315 - printk("\n 0x%03x: ", i); 316 - printk("%02x ", (unsigned char)buf[i]); 317 - } 318 - printk("\n"); 314 + sprintf(name, "hso[%d:%s]", line_count, func_name); 315 + print_hex_dump_bytes(name, DUMP_PREFIX_NONE, buf, len); 319 316 } 320 317 321 318 #define DUMP(buf_, len_) \ ··· 533 528 534 529 static struct hso_serial *get_serial_by_index(unsigned index) 535 530 { 536 - struct hso_serial *serial; 531 + struct hso_serial *serial = NULL; 537 532 unsigned long flags; 538 533 539 - if (!serial_table[index]) 540 - return NULL; 541 534 spin_lock_irqsave(&serial_table_lock, flags); 542 - serial = dev2ser(serial_table[index]); 535 + if (serial_table[index]) 536 + serial = dev2ser(serial_table[index]); 543 537 spin_unlock_irqrestore(&serial_table_lock, flags); 544 538 545 539 return serial; ··· 565 561 static void set_serial_by_index(unsigned index, struct hso_serial *serial) 566 562 { 567 563 unsigned long flags; 564 + 568 565 spin_lock_irqsave(&serial_table_lock, flags); 569 566 if (serial) 570 567 serial_table[index] = serial->parent; ··· 574 569 spin_unlock_irqrestore(&serial_table_lock, flags); 575 570 } 576 571 577 - /* log a meaningfull explanation of an USB status */ 572 + /* log a meaningful explanation of an USB status */ 578 573 static void log_usb_status(int status, const char *function) 579 574 { 580 575 char *explanation; ··· 1108 1103 /* reset the rts and dtr */ 1109 1104 /* do the actual close */ 1110 1105 serial->open_count--; 1106 + kref_put(&serial->parent->ref, hso_serial_ref_free); 1111 1107 if (serial->open_count <= 0) { 1112 - kref_put(&serial->parent->ref, hso_serial_ref_free); 1113 1108 serial->open_count = 0; 1114 1109 if (serial->tty) { 1115 1110 serial->tty->driver_data = NULL; ··· 1472 1467 return; 1473 1468 } 1474 1469 hso_put_activity(serial->parent); 1475 - tty_wakeup(serial->tty); 1470 + if (serial->tty) 1471 + tty_wakeup(serial->tty); 1476 1472 hso_kick_transmit(serial); 1477 1473 1478 1474 D1(" "); ··· 1544 1538 clear_bit(HSO_SERIAL_FLAG_RX_SENT, &serial->flags); 1545 1539 } else { 1546 1540 hso_put_activity(serial->parent); 1547 - tty_wakeup(serial->tty); 1541 + if (serial->tty) 1542 + tty_wakeup(serial->tty); 1548 1543 /* response to a write command */ 1549 1544 hso_kick_transmit(serial); 1550 1545 } ··· 2659 2652 hso_stop_net_device(network_table[i]); 2660 2653 cancel_work_sync(&network_table[i]->async_put_intf); 2661 2654 cancel_work_sync(&network_table[i]->async_get_intf); 2662 - if(rfk) 2655 + if (rfk) 2663 2656 rfkill_unregister(rfk); 2664 2657 hso_free_net_device(network_table[i]); 2665 2658 } ··· 2730 2723 } 2731 2724 2732 2725 /* operations setup of the serial interface */ 2733 - static struct tty_operations hso_serial_ops = { 2726 + static const struct tty_operations hso_serial_ops = { 2734 2727 .open = hso_serial_open, 2735 2728 .close = hso_serial_close, 2736 2729 .write = hso_serial_write,
+2 -7
drivers/net/wireless/ath5k/base.c
··· 40 40 * 41 41 */ 42 42 43 - #include <linux/version.h> 44 43 #include <linux/module.h> 45 44 #include <linux/delay.h> 46 45 #include <linux/hardirq.h> ··· 586 587 ath5k_stop_hw(sc); 587 588 588 589 free_irq(pdev->irq, sc); 589 - pci_disable_msi(pdev); 590 590 pci_save_state(pdev); 591 591 pci_disable_device(pdev); 592 592 pci_set_power_state(pdev, PCI_D3hot); ··· 614 616 */ 615 617 pci_write_config_byte(pdev, 0x41, 0); 616 618 617 - pci_enable_msi(pdev); 618 - 619 619 err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); 620 620 if (err) { 621 621 ATH5K_ERR(sc, "request_irq failed\n"); 622 - goto err_msi; 622 + goto err_no_irq; 623 623 } 624 624 625 625 err = ath5k_init(sc); ··· 638 642 return 0; 639 643 err_irq: 640 644 free_irq(pdev->irq, sc); 641 - err_msi: 642 - pci_disable_msi(pdev); 645 + err_no_irq: 643 646 pci_disable_device(pdev); 644 647 return err; 645 648 }
+5 -1
drivers/net/wireless/ath9k/hw.c
··· 5017 5017 5018 5018 for (i = 0; i < 123; i++) { 5019 5019 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { 5020 - if ((abs(cur_vit_mask - bin)) < 75) 5020 + 5021 + /* workaround for gcc bug #37014 */ 5022 + volatile int tmp = abs(cur_vit_mask - bin); 5023 + 5024 + if (tmp < 75) 5021 5025 mask_amt = 1; 5022 5026 else 5023 5027 mask_amt = 0;
+2 -1
drivers/net/wireless/b43/main.c
··· 33 33 #include <linux/moduleparam.h> 34 34 #include <linux/if_arp.h> 35 35 #include <linux/etherdevice.h> 36 - #include <linux/version.h> 37 36 #include <linux/firmware.h> 38 37 #include <linux/wireless.h> 39 38 #include <linux/workqueue.h> ··· 4614 4615 if (bus->bustype == SSB_BUSTYPE_PCI) { 4615 4616 pdev = bus->host_pci; 4616 4617 if (IS_PDEV(pdev, BROADCOM, 0x4318, ASUSTEK, 0x100F) || 4618 + IS_PDEV(pdev, BROADCOM, 0x4320, DELL, 0x0003) || 4617 4619 IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0015) || 4620 + IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0014) || 4618 4621 IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0013)) 4619 4622 bus->sprom.boardflags_lo &= ~B43_BFL_BTCOEXIST; 4620 4623 }
-1
drivers/net/wireless/b43legacy/main.c
··· 34 34 #include <linux/moduleparam.h> 35 35 #include <linux/if_arp.h> 36 36 #include <linux/etherdevice.h> 37 - #include <linux/version.h> 38 37 #include <linux/firmware.h> 39 38 #include <linux/wireless.h> 40 39 #include <linux/workqueue.h>
-1
drivers/net/wireless/ipw2100.c
··· 157 157 #include <linux/stringify.h> 158 158 #include <linux/tcp.h> 159 159 #include <linux/types.h> 160 - #include <linux/version.h> 161 160 #include <linux/time.h> 162 161 #include <linux/firmware.h> 163 162 #include <linux/acpi.h>
-1
drivers/net/wireless/ipw2200.c
··· 31 31 ******************************************************************************/ 32 32 33 33 #include "ipw2200.h" 34 - #include <linux/version.h> 35 34 36 35 37 36 #ifndef KBUILD_EXTMOD
-1
drivers/net/wireless/iwlwifi/iwl-3945-led.c
··· 27 27 28 28 #include <linux/kernel.h> 29 29 #include <linux/module.h> 30 - #include <linux/version.h> 31 30 #include <linux/init.h> 32 31 #include <linux/pci.h> 33 32 #include <linux/dma-mapping.h>
-1
drivers/net/wireless/iwlwifi/iwl-3945.c
··· 26 26 27 27 #include <linux/kernel.h> 28 28 #include <linux/module.h> 29 - #include <linux/version.h> 30 29 #include <linux/init.h> 31 30 #include <linux/pci.h> 32 31 #include <linux/dma-mapping.h>
+1 -2
drivers/net/wireless/iwlwifi/iwl-4965.c
··· 26 26 27 27 #include <linux/kernel.h> 28 28 #include <linux/module.h> 29 - #include <linux/version.h> 30 29 #include <linux/init.h> 31 30 #include <linux/pci.h> 32 31 #include <linux/dma-mapping.h> ··· 966 967 967 968 s = iwl4965_get_sub_band(priv, channel); 968 969 if (s >= EEPROM_TX_POWER_BANDS) { 969 - IWL_ERROR("Tx Power can not find channel %d ", channel); 970 + IWL_ERROR("Tx Power can not find channel %d\n", channel); 970 971 return -1; 971 972 } 972 973
-1
drivers/net/wireless/iwlwifi/iwl-5000.c
··· 25 25 26 26 #include <linux/kernel.h> 27 27 #include <linux/module.h> 28 - #include <linux/version.h> 29 28 #include <linux/init.h> 30 29 #include <linux/pci.h> 31 30 #include <linux/dma-mapping.h>
-1
drivers/net/wireless/iwlwifi/iwl-agn.c
··· 29 29 30 30 #include <linux/kernel.h> 31 31 #include <linux/module.h> 32 - #include <linux/version.h> 33 32 #include <linux/init.h> 34 33 #include <linux/pci.h> 35 34 #include <linux/dma-mapping.h>
-1
drivers/net/wireless/iwlwifi/iwl-core.c
··· 28 28 29 29 #include <linux/kernel.h> 30 30 #include <linux/module.h> 31 - #include <linux/version.h> 32 31 #include <net/mac80211.h> 33 32 34 33 struct iwl_priv; /* FIXME: remove */
+3 -4
drivers/net/wireless/iwlwifi/iwl-eeprom.c
··· 63 63 64 64 #include <linux/kernel.h> 65 65 #include <linux/module.h> 66 - #include <linux/version.h> 67 66 #include <linux/init.h> 68 67 69 68 #include <net/mac80211.h> ··· 145 146 { 146 147 u32 gp = iwl_read32(priv, CSR_EEPROM_GP); 147 148 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { 148 - IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); 149 + IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); 149 150 return -ENOENT; 150 151 } 151 152 return 0; ··· 226 227 227 228 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv); 228 229 if (ret < 0) { 229 - IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); 230 + IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); 230 231 ret = -ENOENT; 231 232 goto err; 232 233 } ··· 253 254 } 254 255 255 256 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { 256 - IWL_ERROR("Time out reading EEPROM[%d]", addr); 257 + IWL_ERROR("Time out reading EEPROM[%d]\n", addr); 257 258 ret = -ETIMEDOUT; 258 259 goto done; 259 260 }
-1
drivers/net/wireless/iwlwifi/iwl-hcmd.c
··· 28 28 29 29 #include <linux/kernel.h> 30 30 #include <linux/module.h> 31 - #include <linux/version.h> 32 31 #include <net/mac80211.h> 33 32 34 33 #include "iwl-dev.h" /* FIXME: remove */
-1
drivers/net/wireless/iwlwifi/iwl-led.c
··· 27 27 28 28 #include <linux/kernel.h> 29 29 #include <linux/module.h> 30 - #include <linux/version.h> 31 30 #include <linux/init.h> 32 31 #include <linux/pci.h> 33 32 #include <linux/dma-mapping.h>
-1
drivers/net/wireless/iwlwifi/iwl-power.c
··· 29 29 30 30 #include <linux/kernel.h> 31 31 #include <linux/module.h> 32 - #include <linux/version.h> 33 32 #include <linux/init.h> 34 33 35 34 #include <net/mac80211.h>
-1
drivers/net/wireless/iwlwifi/iwl-rfkill.c
··· 27 27 *****************************************************************************/ 28 28 #include <linux/kernel.h> 29 29 #include <linux/module.h> 30 - #include <linux/version.h> 31 30 #include <linux/init.h> 32 31 33 32 #include <net/mac80211.h>
+2 -2
drivers/net/wireless/iwlwifi/iwl-sta.c
··· 207 207 case WLAN_HT_CAP_MIMO_PS_DISABLED: 208 208 break; 209 209 default: 210 - IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode); 210 + IWL_WARNING("Invalid MIMO PS mode %d\n", mimo_ps_mode); 211 211 break; 212 212 } 213 213 ··· 969 969 return priv->hw_params.bcast_sta_id; 970 970 971 971 default: 972 - IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode); 972 + IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode); 973 973 return priv->hw_params.bcast_sta_id; 974 974 } 975 975 }
+2 -2
drivers/net/wireless/iwlwifi/iwl-tx.c
··· 493 493 /* Alloc keep-warm buffer */ 494 494 ret = iwl_kw_alloc(priv); 495 495 if (ret) { 496 - IWL_ERROR("Keep Warm allocation failed"); 496 + IWL_ERROR("Keep Warm allocation failed\n"); 497 497 goto error_kw; 498 498 } 499 499 spin_lock_irqsave(&priv->lock, flags); ··· 1463 1463 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); 1464 1464 1465 1465 if (scd_flow >= priv->hw_params.max_txq_num) { 1466 - IWL_ERROR("BUG_ON scd_flow is bigger than number of queues"); 1466 + IWL_ERROR("BUG_ON scd_flow is bigger than number of queues\n"); 1467 1467 return; 1468 1468 } 1469 1469
+3 -4
drivers/net/wireless/iwlwifi/iwl3945-base.c
··· 29 29 30 30 #include <linux/kernel.h> 31 31 #include <linux/module.h> 32 - #include <linux/version.h> 33 32 #include <linux/init.h> 34 33 #include <linux/pci.h> 35 34 #include <linux/dma-mapping.h> ··· 1557 1558 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE); 1558 1559 1559 1560 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { 1560 - IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); 1561 + IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); 1561 1562 return -ENOENT; 1562 1563 } 1563 1564 ··· 1582 1583 } 1583 1584 1584 1585 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { 1585 - IWL_ERROR("Time out reading EEPROM[%d]", addr); 1586 + IWL_ERROR("Time out reading EEPROM[%d]\n", addr); 1586 1587 return -ETIMEDOUT; 1587 1588 } 1588 1589 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16)); ··· 2506 2507 return priv->hw_setting.bcast_sta_id; 2507 2508 2508 2509 default: 2509 - IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode); 2510 + IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode); 2510 2511 return priv->hw_setting.bcast_sta_id; 2511 2512 } 2512 2513 }
+27 -24
drivers/net/wireless/p54/p54common.c
··· 413 413 last_addr = range->end_addr; 414 414 __skb_unlink(entry, &priv->tx_queue); 415 415 memset(&info->status, 0, sizeof(info->status)); 416 - priv->tx_stats[skb_get_queue_mapping(skb)].len--; 417 416 entry_hdr = (struct p54_control_hdr *) entry->data; 418 417 entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data; 419 418 if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0) 420 419 pad = entry_data->align[0]; 421 420 421 + priv->tx_stats[entry_data->hw_queue - 4].len--; 422 422 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 423 423 if (!(payload->status & 0x01)) 424 424 info->flags |= IEEE80211_TX_STAT_ACK; ··· 557 557 struct p54_tx_control_allocdata *txhdr; 558 558 size_t padding, len; 559 559 u8 rate; 560 + u8 cts_rate = 0x20; 560 561 561 562 current_queue = &priv->tx_stats[skb_get_queue_mapping(skb)]; 562 563 if (unlikely(current_queue->len > current_queue->limit)) ··· 582 581 hdr->type = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 0 : cpu_to_le16(1); 583 582 hdr->retry1 = hdr->retry2 = info->control.retry_limit; 584 583 585 - memset(txhdr->wep_key, 0x0, 16); 586 - txhdr->padding = 0; 587 - txhdr->padding2 = 0; 588 - 589 584 /* TODO: add support for alternate retry TX rates */ 590 585 rate = ieee80211_get_tx_rate(dev, info)->hw_value; 591 - if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE) 586 + if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE) { 592 587 rate |= 0x10; 593 - if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) 588 + cts_rate |= 0x10; 589 + } 590 + if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) { 594 591 rate |= 0x40; 595 - else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) 592 + cts_rate |= ieee80211_get_rts_cts_rate(dev, info)->hw_value; 593 + } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) { 596 594 rate |= 0x20; 595 + cts_rate |= ieee80211_get_rts_cts_rate(dev, info)->hw_value; 596 + } 597 597 memset(txhdr->rateset, rate, 8); 598 - txhdr->wep_key_present = 0; 599 - txhdr->wep_key_len = 0; 600 - txhdr->frame_type = cpu_to_le32(skb_get_queue_mapping(skb) + 4); 601 - txhdr->magic4 = 0; 602 - txhdr->antenna = (info->antenna_sel_tx == 0) ? 598 + txhdr->key_type = 0; 599 + txhdr->key_len = 0; 600 + txhdr->hw_queue = skb_get_queue_mapping(skb) + 4; 601 + txhdr->tx_antenna = (info->antenna_sel_tx == 0) ? 603 602 2 : info->antenna_sel_tx - 1; 604 603 txhdr->output_power = 0x7f; // HW Maximum 605 - txhdr->magic5 = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 606 - 0 : ((rate > 0x3) ? cpu_to_le32(0x33) : cpu_to_le32(0x23)); 604 + txhdr->cts_rate = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 605 + 0 : cts_rate; 607 606 if (padding) 608 607 txhdr->align[0] = padding; 609 608 ··· 837 836 struct p54_common *priv = dev->priv; 838 837 int err; 839 838 839 + if (!priv->cached_vdcf) { 840 + priv->cached_vdcf = kzalloc(sizeof(struct p54_tx_control_vdcf)+ 841 + priv->tx_hdr_len + sizeof(struct p54_control_hdr), 842 + GFP_KERNEL); 843 + 844 + if (!priv->cached_vdcf) 845 + return -ENOMEM; 846 + } 847 + 840 848 err = priv->open(dev); 841 849 if (!err) 842 850 priv->mode = IEEE80211_IF_TYPE_MNTR; 851 + 852 + p54_init_vdcf(dev); 843 853 844 854 return err; 845 855 } ··· 1031 1019 dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 + 1032 1020 sizeof(struct p54_tx_control_allocdata); 1033 1021 1034 - priv->cached_vdcf = kzalloc(sizeof(struct p54_tx_control_vdcf) + 1035 - priv->tx_hdr_len + sizeof(struct p54_control_hdr), GFP_KERNEL); 1036 - 1037 - if (!priv->cached_vdcf) { 1038 - ieee80211_free_hw(dev); 1039 - return NULL; 1040 - } 1041 - 1042 - p54_init_vdcf(dev); 1043 1022 mutex_init(&priv->conf_mutex); 1044 1023 1045 1024 return dev;
+9 -9
drivers/net/wireless/p54/p54common.h
··· 183 183 184 184 struct p54_tx_control_allocdata { 185 185 u8 rateset[8]; 186 - u16 padding; 187 - u8 wep_key_present; 188 - u8 wep_key_len; 189 - u8 wep_key[16]; 190 - __le32 frame_type; 191 - u32 padding2; 192 - __le16 magic4; 193 - u8 antenna; 186 + u8 unalloc0[2]; 187 + u8 key_type; 188 + u8 key_len; 189 + u8 key[16]; 190 + u8 hw_queue; 191 + u8 unalloc1[9]; 192 + u8 tx_antenna; 194 193 u8 output_power; 195 - __le32 magic5; 194 + u8 cts_rate; 195 + u8 unalloc2[3]; 196 196 u8 align[0]; 197 197 } __attribute__ ((packed)); 198 198
+10
drivers/net/wireless/p54/p54usb.c
··· 109 109 urb->context = skb; 110 110 skb_queue_tail(&priv->rx_queue, skb); 111 111 } else { 112 + if (!priv->hw_type) 113 + skb_push(skb, sizeof(struct net2280_tx_hdr)); 114 + 115 + skb_reset_tail_pointer(skb); 112 116 skb_trim(skb, 0); 117 + if (urb->transfer_buffer != skb_tail_pointer(skb)) { 118 + /* this should not happen */ 119 + WARN_ON(1); 120 + urb->transfer_buffer = skb_tail_pointer(skb); 121 + } 122 + 113 123 skb_queue_tail(&priv->rx_queue, skb); 114 124 } 115 125
+4 -4
drivers/net/wireless/rt2x00/rt2x00queue.h
··· 173 173 * frame transmission failed due to excessive retries. 174 174 */ 175 175 enum txdone_entry_desc_flags { 176 - TXDONE_UNKNOWN = 1 << 0, 177 - TXDONE_SUCCESS = 1 << 1, 178 - TXDONE_FAILURE = 1 << 2, 179 - TXDONE_EXCESSIVE_RETRY = 1 << 3, 176 + TXDONE_UNKNOWN, 177 + TXDONE_SUCCESS, 178 + TXDONE_FAILURE, 179 + TXDONE_EXCESSIVE_RETRY, 180 180 }; 181 181 182 182 /**
+1
drivers/net/wireless/rt2x00/rt2x00usb.c
··· 181 181 * (Only indirectly by looking at the failed TX counters 182 182 * in the register). 183 183 */ 184 + txdesc.flags = 0; 184 185 if (!urb->status) 185 186 __set_bit(TXDONE_UNKNOWN, &txdesc.flags); 186 187 else
+1
drivers/net/wireless/rtl8187_dev.c
··· 40 40 /* Netgear */ 41 41 {USB_DEVICE(0x0846, 0x6100), .driver_info = DEVICE_RTL8187}, 42 42 {USB_DEVICE(0x0846, 0x6a00), .driver_info = DEVICE_RTL8187}, 43 + {USB_DEVICE(0x0846, 0x4260), .driver_info = DEVICE_RTL8187B}, 43 44 /* HP */ 44 45 {USB_DEVICE(0x03f0, 0xca02), .driver_info = DEVICE_RTL8187}, 45 46 /* Sitecom */
+10
drivers/of/device.c
··· 57 57 return sprintf(buf, "%s\n", ofdev->node->full_name); 58 58 } 59 59 60 + static ssize_t name_show(struct device *dev, 61 + struct device_attribute *attr, char *buf) 62 + { 63 + struct of_device *ofdev; 64 + 65 + ofdev = to_of_device(dev); 66 + return sprintf(buf, "%s\n", ofdev->node->name); 67 + } 68 + 60 69 static ssize_t modalias_show(struct device *dev, 61 70 struct device_attribute *attr, char *buf) 62 71 { ··· 80 71 81 72 struct device_attribute of_platform_device_attrs[] = { 82 73 __ATTR_RO(devspec), 74 + __ATTR_RO(name), 83 75 __ATTR_RO(modalias), 84 76 __ATTR_NULL 85 77 };
+26 -12
drivers/pci/hotplug/acpi_pcihp.c
··· 382 382 int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) 383 383 { 384 384 acpi_status status; 385 - acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev)); 385 + acpi_handle chandle, handle; 386 386 struct pci_dev *pdev = dev; 387 387 struct pci_bus *parent; 388 388 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; ··· 399 399 * Per PCI firmware specification, we should run the ACPI _OSC 400 400 * method to get control of hotplug hardware before using it. If 401 401 * an _OSC is missing, we look for an OSHP to do the same thing. 402 - * To handle different BIOS behavior, we look for _OSC and OSHP 403 - * within the scope of the hotplug controller and its parents, 402 + * To handle different BIOS behavior, we look for _OSC on a root 403 + * bridge preferentially (according to PCI fw spec). Later for 404 + * OSHP within the scope of the hotplug controller and its parents, 404 405 * upto the host bridge under which this controller exists. 405 406 */ 407 + handle = acpi_find_root_bridge_handle(pdev); 408 + if (handle) { 409 + acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); 410 + dbg("Trying to get hotplug control for %s\n", 411 + (char *)string.pointer); 412 + status = pci_osc_control_set(handle, flags); 413 + if (ACPI_SUCCESS(status)) 414 + goto got_one; 415 + kfree(string.pointer); 416 + string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; 417 + } 418 + 419 + pdev = dev; 420 + handle = DEVICE_ACPI_HANDLE(&dev->dev); 406 421 while (!handle) { 407 422 /* 408 423 * This hotplug controller was not listed in the ACPI name ··· 442 427 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); 443 428 dbg("Trying to get hotplug control for %s \n", 444 429 (char *)string.pointer); 445 - status = pci_osc_control_set(handle, flags); 446 - if (status == AE_NOT_FOUND) 447 - status = acpi_run_oshp(handle); 448 - if (ACPI_SUCCESS(status)) { 449 - dbg("Gained control for hotplug HW for pci %s (%s)\n", 450 - pci_name(dev), (char *)string.pointer); 451 - kfree(string.pointer); 452 - return 0; 453 - } 430 + status = acpi_run_oshp(handle); 431 + if (ACPI_SUCCESS(status)) 432 + goto got_one; 454 433 if (acpi_root_bridge(handle)) 455 434 break; 456 435 chandle = handle; ··· 458 449 459 450 kfree(string.pointer); 460 451 return -ENODEV; 452 + got_one: 453 + dbg("Gained control for hotplug HW for pci %s (%s)\n", pci_name(dev), 454 + (char *)string.pointer); 455 + kfree(string.pointer); 456 + return 0; 461 457 } 462 458 EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); 463 459
+1 -6
drivers/pci/pcie/aer/aerdrv_acpi.c
··· 36 36 if (acpi_pci_disabled) 37 37 return -1; 38 38 39 - /* Find root host bridge */ 40 - while (pdev->bus->self) 41 - pdev = pdev->bus->self; 42 - handle = acpi_get_pci_rootbridge_handle( 43 - pci_domain_nr(pdev->bus), pdev->bus->number); 44 - 39 + handle = acpi_find_root_bridge_handle(pdev); 45 40 if (handle) { 46 41 pcie_osc_support_set(OSC_EXT_PCI_CONFIG_SUPPORT); 47 42 status = pci_osc_control_set(handle,
+3
drivers/pci/probe.c
··· 383 383 res->start = base; 384 384 if (!res->end) 385 385 res->end = limit + 0xfff; 386 + printk(KERN_INFO "PCI: bridge %s io port: [%llx, %llx]\n", pci_name(dev), res->start, res->end); 386 387 } 387 388 388 389 res = child->resource[1]; ··· 395 394 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; 396 395 res->start = base; 397 396 res->end = limit + 0xfffff; 397 + printk(KERN_INFO "PCI: bridge %s 32bit mmio: [%llx, %llx]\n", pci_name(dev), res->start, res->end); 398 398 } 399 399 400 400 res = child->resource[2]; ··· 431 429 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; 432 430 res->start = base; 433 431 res->end = limit + 0xfffff; 432 + printk(KERN_INFO "PCI: bridge %s %sbit mmio pref: [%llx, %llx]\n", pci_name(dev), (res->flags & PCI_PREF_RANGE_TYPE_64)?"64":"32",res->start, res->end); 434 433 } 435 434 } 436 435
+35
drivers/pci/setup-bus.c
··· 530 530 } 531 531 EXPORT_SYMBOL(pci_bus_assign_resources); 532 532 533 + static void pci_bus_dump_res(struct pci_bus *bus) 534 + { 535 + int i; 536 + 537 + for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 538 + struct resource *res = bus->resource[i]; 539 + if (!res) 540 + continue; 541 + 542 + printk(KERN_INFO "bus: %02x index %x %s: [%llx, %llx]\n", bus->number, i, (res->flags & IORESOURCE_IO)? "io port":"mmio", res->start, res->end); 543 + } 544 + } 545 + 546 + static void pci_bus_dump_resources(struct pci_bus *bus) 547 + { 548 + struct pci_bus *b; 549 + struct pci_dev *dev; 550 + 551 + 552 + pci_bus_dump_res(bus); 553 + 554 + list_for_each_entry(dev, &bus->devices, bus_list) { 555 + b = dev->subordinate; 556 + if (!b) 557 + continue; 558 + 559 + pci_bus_dump_resources(b); 560 + } 561 + } 562 + 533 563 void __init 534 564 pci_assign_unassigned_resources(void) 535 565 { ··· 574 544 list_for_each_entry(bus, &pci_root_buses, node) { 575 545 pci_bus_assign_resources(bus); 576 546 pci_enable_bridges(bus); 547 + } 548 + 549 + /* dump the resource on buses */ 550 + list_for_each_entry(bus, &pci_root_buses, node) { 551 + pci_bus_dump_resources(bus); 577 552 } 578 553 }
+1 -1
drivers/rtc/Kconfig
··· 561 561 562 562 config RTC_DRV_BFIN 563 563 tristate "Blackfin On-Chip RTC" 564 - depends on BLACKFIN 564 + depends on BLACKFIN && !BF561 565 565 help 566 566 If you say yes here you will get support for the 567 567 Blackfin On-Chip Real Time Clock.
+28 -32
drivers/rtc/rtc-bfin.c
··· 218 218 return IRQ_NONE; 219 219 } 220 220 221 - static int bfin_rtc_open(struct device *dev) 222 - { 223 - int ret; 224 - 225 - dev_dbg_stamp(dev); 226 - 227 - ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, to_platform_device(dev)->name, dev); 228 - if (!ret) 229 - bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE); 230 - 231 - return ret; 232 - } 233 - 234 - static void bfin_rtc_release(struct device *dev) 235 - { 236 - dev_dbg_stamp(dev); 237 - bfin_rtc_reset(dev, 0); 238 - free_irq(IRQ_RTC, dev); 239 - } 240 - 241 221 static void bfin_rtc_int_set(u16 rtc_int) 242 222 { 243 223 bfin_write_RTC_ISTAT(rtc_int); ··· 350 370 } 351 371 352 372 static struct rtc_class_ops bfin_rtc_ops = { 353 - .open = bfin_rtc_open, 354 - .release = bfin_rtc_release, 355 373 .ioctl = bfin_rtc_ioctl, 356 374 .read_time = bfin_rtc_read_time, 357 375 .set_time = bfin_rtc_set_time, ··· 361 383 static int __devinit bfin_rtc_probe(struct platform_device *pdev) 362 384 { 363 385 struct bfin_rtc *rtc; 386 + struct device *dev = &pdev->dev; 364 387 int ret = 0; 388 + unsigned long timeout; 365 389 366 - dev_dbg_stamp(&pdev->dev); 390 + dev_dbg_stamp(dev); 367 391 392 + /* Allocate memory for our RTC struct */ 368 393 rtc = kzalloc(sizeof(*rtc), GFP_KERNEL); 369 394 if (unlikely(!rtc)) 370 395 return -ENOMEM; 396 + platform_set_drvdata(pdev, rtc); 397 + device_init_wakeup(dev, 1); 371 398 372 - rtc->rtc_dev = rtc_device_register(pdev->name, &pdev->dev, &bfin_rtc_ops, THIS_MODULE); 373 - if (IS_ERR(rtc)) { 374 - ret = PTR_ERR(rtc->rtc_dev); 399 + /* Grab the IRQ and init the hardware */ 400 + ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, pdev->name, dev); 401 + if (unlikely(ret)) 375 402 goto err; 376 - } 377 - 378 - /* see comment at top of file about stopwatch/PIE */ 403 + /* sometimes the bootloader touched things, but the write complete was not 404 + * enabled, so let's just do a quick timeout here since the IRQ will not fire ... 405 + */ 406 + timeout = jiffies + HZ; 407 + while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING) 408 + if (time_after(jiffies, timeout)) 409 + break; 410 + bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE); 379 411 bfin_write_RTC_SWCNT(0); 380 412 381 - platform_set_drvdata(pdev, rtc); 382 - 383 - device_init_wakeup(&pdev->dev, 1); 413 + /* Register our RTC with the RTC framework */ 414 + rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops, THIS_MODULE); 415 + if (unlikely(IS_ERR(rtc))) { 416 + ret = PTR_ERR(rtc->rtc_dev); 417 + goto err_irq; 418 + } 384 419 385 420 return 0; 386 421 422 + err_irq: 423 + free_irq(IRQ_RTC, dev); 387 424 err: 388 425 kfree(rtc); 389 426 return ret; ··· 407 414 static int __devexit bfin_rtc_remove(struct platform_device *pdev) 408 415 { 409 416 struct bfin_rtc *rtc = platform_get_drvdata(pdev); 417 + struct device *dev = &pdev->dev; 410 418 419 + bfin_rtc_reset(dev, 0); 420 + free_irq(IRQ_RTC, dev); 411 421 rtc_device_unregister(rtc->rtc_dev); 412 422 platform_set_drvdata(pdev, NULL); 413 423 kfree(rtc);
+4 -1
drivers/rtc/rtc-dev.c
··· 403 403 404 404 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL 405 405 case RTC_UIE_OFF: 406 + mutex_unlock(&rtc->ops_lock); 406 407 clear_uie(rtc); 407 - break; 408 + return 0; 408 409 409 410 case RTC_UIE_ON: 411 + mutex_unlock(&rtc->ops_lock); 410 412 err = set_uie(rtc); 413 + return err; 411 414 #endif 412 415 default: 413 416 err = -ENOTTY;
+5 -5
drivers/rtc/rtc-ds1374.c
··· 173 173 int cr, sr; 174 174 int ret = 0; 175 175 176 - if (client->irq < 0) 176 + if (client->irq <= 0) 177 177 return -EINVAL; 178 178 179 179 mutex_lock(&ds1374->mutex); ··· 212 212 int cr; 213 213 int ret = 0; 214 214 215 - if (client->irq < 0) 215 + if (client->irq <= 0) 216 216 return -EINVAL; 217 217 218 218 ret = ds1374_read_time(dev, &now); ··· 381 381 if (ret) 382 382 goto out_free; 383 383 384 - if (client->irq >= 0) { 384 + if (client->irq > 0) { 385 385 ret = request_irq(client->irq, ds1374_irq, 0, 386 386 "ds1374", client); 387 387 if (ret) { ··· 401 401 return 0; 402 402 403 403 out_irq: 404 - if (client->irq >= 0) 404 + if (client->irq > 0) 405 405 free_irq(client->irq, client); 406 406 407 407 out_free: ··· 414 414 { 415 415 struct ds1374 *ds1374 = i2c_get_clientdata(client); 416 416 417 - if (client->irq >= 0) { 417 + if (client->irq > 0) { 418 418 mutex_lock(&ds1374->mutex); 419 419 ds1374->exiting = 1; 420 420 mutex_unlock(&ds1374->mutex);
-2
drivers/rtc/rtc-max6902.c
··· 20 20 */ 21 21 22 22 #include <linux/module.h> 23 - #include <linux/version.h> 24 - 25 23 #include <linux/kernel.h> 26 24 #include <linux/platform_device.h> 27 25 #include <linux/init.h>
-1
drivers/rtc/rtc-r9701.c
··· 14 14 */ 15 15 16 16 #include <linux/module.h> 17 - #include <linux/version.h> 18 17 #include <linux/kernel.h> 19 18 #include <linux/platform_device.h> 20 19 #include <linux/device.h>
+1 -4
drivers/s390/block/dasd.c
··· 2333 2333 { 2334 2334 struct dasd_device *device; 2335 2335 struct dasd_ccw_req *cqr; 2336 - unsigned long flags; 2337 2336 int ret; 2338 2337 2339 - device = dasd_device_from_cdev(cdev); 2338 + device = dasd_device_from_cdev_locked(cdev); 2340 2339 if (IS_ERR(device)) 2341 2340 return 0; 2342 - spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 2343 2341 ret = 0; 2344 2342 switch (event) { 2345 2343 case CIO_GONE: ··· 2367 2369 ret = 1; 2368 2370 break; 2369 2371 } 2370 - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 2371 2372 dasd_put_device(device); 2372 2373 return ret; 2373 2374 }
+1 -1
drivers/s390/block/dasd_eckd.h
··· 308 308 unsigned char flags; 309 309 unsigned char reserved[4]; 310 310 unsigned char suborder; 311 - unsigned char varies[9]; 311 + unsigned char varies[5]; 312 312 } __attribute__ ((packed)); 313 313 314 314 /*
+2 -1
drivers/s390/block/dasd_eer.c
··· 16 16 #include <linux/poll.h> 17 17 #include <linux/mutex.h> 18 18 #include <linux/smp_lock.h> 19 + #include <linux/err.h> 19 20 20 21 #include <asm/uaccess.h> 21 22 #include <asm/atomic.h> ··· 458 457 459 458 cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */, 460 459 SNSS_DATA_SIZE, device); 461 - if (!cqr) 460 + if (IS_ERR(cqr)) 462 461 return -ENOMEM; 463 462 464 463 cqr->startdev = device;
+4
drivers/s390/block/dcssblk.c
··· 384 384 * get minor, add to list 385 385 */ 386 386 down_write(&dcssblk_devices_sem); 387 + if (dcssblk_get_segment_by_name(local_buf)) { 388 + rc = -EEXIST; 389 + goto release_gd; 390 + } 387 391 rc = dcssblk_assign_free_minor(dev_info); 388 392 if (rc) { 389 393 up_write(&dcssblk_devices_sem);
+1 -1
drivers/s390/char/tape_char.c
··· 109 109 110 110 /* The current idal buffer is not correct. Allocate a new one. */ 111 111 new = idal_buffer_alloc(block_size, 0); 112 - if (new == NULL) 112 + if (IS_ERR(new)) 113 113 return -ENOMEM; 114 114 115 115 if (device->char_data.idal_buf != NULL)
+1 -1
drivers/s390/char/tape_std.c
··· 248 248 249 249 /* Allocate a new idal buffer. */ 250 250 new = idal_buffer_alloc(count, 0); 251 - if (new == NULL) 251 + if (IS_ERR(new)) 252 252 return -ENOMEM; 253 253 if (device->char_data.idal_buf != NULL) 254 254 idal_buffer_free(device->char_data.idal_buf);
+12 -8
drivers/s390/cio/ccwgroup.c
··· 112 112 gdev = to_ccwgroupdev(dev); 113 113 114 114 for (i = 0; i < gdev->count; i++) { 115 - dev_set_drvdata(&gdev->cdev[i]->dev, NULL); 116 - put_device(&gdev->cdev[i]->dev); 115 + if (gdev->cdev[i]) { 116 + dev_set_drvdata(&gdev->cdev[i]->dev, NULL); 117 + put_device(&gdev->cdev[i]->dev); 118 + } 117 119 } 118 120 kfree(gdev); 119 121 } ··· 223 221 atomic_set(&gdev->onoff, 0); 224 222 mutex_init(&gdev->reg_mutex); 225 223 mutex_lock(&gdev->reg_mutex); 224 + gdev->creator_id = creator_id; 225 + gdev->count = num_devices; 226 + gdev->dev.bus = &ccwgroup_bus_type; 227 + gdev->dev.parent = root; 228 + gdev->dev.release = ccwgroup_release; 229 + device_initialize(&gdev->dev); 230 + 226 231 curr_buf = buf; 227 232 for (i = 0; i < num_devices && curr_buf; i++) { 228 233 rc = __get_next_bus_id(&curr_buf, tmp_bus_id); ··· 267 258 rc = -EINVAL; 268 259 goto error; 269 260 } 270 - gdev->creator_id = creator_id; 271 - gdev->count = num_devices; 272 - gdev->dev.bus = &ccwgroup_bus_type; 273 - gdev->dev.parent = root; 274 - gdev->dev.release = ccwgroup_release; 275 261 276 262 snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s", 277 263 gdev->cdev[0]->dev.bus_id); 278 264 279 - rc = device_register(&gdev->dev); 265 + rc = device_add(&gdev->dev); 280 266 if (rc) 281 267 goto error; 282 268 get_device(&gdev->dev);
-1
drivers/s390/cio/css.c
··· 477 477 478 478 void css_wait_for_slow_path(void) 479 479 { 480 - flush_workqueue(ccw_device_notify_work); 481 480 flush_workqueue(slow_path_wq); 482 481 } 483 482
+20 -20
drivers/s390/cio/device.c
··· 150 150 }; 151 151 152 152 struct workqueue_struct *ccw_device_work; 153 - struct workqueue_struct *ccw_device_notify_work; 154 153 wait_queue_head_t ccw_device_init_wq; 155 154 atomic_t ccw_device_init_count; 156 155 ··· 167 168 ccw_device_work = create_singlethread_workqueue("cio"); 168 169 if (!ccw_device_work) 169 170 return -ENOMEM; /* FIXME: better errno ? */ 170 - ccw_device_notify_work = create_singlethread_workqueue("cio_notify"); 171 - if (!ccw_device_notify_work) { 172 - ret = -ENOMEM; /* FIXME: better errno ? */ 173 - goto out_err; 174 - } 175 171 slow_path_wq = create_singlethread_workqueue("kslowcrw"); 176 172 if (!slow_path_wq) { 177 173 ret = -ENOMEM; /* FIXME: better errno ? */ ··· 186 192 out_err: 187 193 if (ccw_device_work) 188 194 destroy_workqueue(ccw_device_work); 189 - if (ccw_device_notify_work) 190 - destroy_workqueue(ccw_device_notify_work); 191 195 if (slow_path_wq) 192 196 destroy_workqueue(slow_path_wq); 193 197 return ret; ··· 196 204 { 197 205 css_driver_unregister(&io_subchannel_driver); 198 206 bus_unregister(&ccw_bus_type); 199 - destroy_workqueue(ccw_device_notify_work); 200 207 destroy_workqueue(ccw_device_work); 201 208 } 202 209 ··· 1487 1496 ccw_device_schedule_recovery(); 1488 1497 } 1489 1498 1499 + void ccw_device_set_notoper(struct ccw_device *cdev) 1500 + { 1501 + struct subchannel *sch = to_subchannel(cdev->dev.parent); 1502 + 1503 + CIO_TRACE_EVENT(2, "notoper"); 1504 + CIO_TRACE_EVENT(2, sch->dev.bus_id); 1505 + ccw_device_set_timeout(cdev, 0); 1506 + cio_disable_subchannel(sch); 1507 + cdev->private->state = DEV_STATE_NOT_OPER; 1508 + } 1509 + 1490 1510 static int io_subchannel_sch_event(struct subchannel *sch, int slow) 1491 1511 { 1492 1512 int event, ret, disc; 1493 1513 unsigned long flags; 1494 - enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; 1514 + enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE, DISC } action; 1495 1515 struct ccw_device *cdev; 1496 1516 1497 1517 spin_lock_irqsave(sch->lock, flags); ··· 1537 1535 } 1538 1536 /* fall through */ 1539 1537 case CIO_GONE: 1540 - /* Prevent unwanted effects when opening lock. */ 1541 - cio_disable_subchannel(sch); 1542 - device_set_disconnected(cdev); 1543 1538 /* Ask driver what to do with device. */ 1544 - action = UNREGISTER; 1545 - spin_unlock_irqrestore(sch->lock, flags); 1546 - ret = io_subchannel_notify(sch, event); 1547 - spin_lock_irqsave(sch->lock, flags); 1548 - if (ret) 1549 - action = NONE; 1539 + if (io_subchannel_notify(sch, event)) 1540 + action = DISC; 1541 + else 1542 + action = UNREGISTER; 1550 1543 break; 1551 1544 case CIO_REVALIDATE: 1552 1545 /* Device will be removed, so no notify necessary. */ ··· 1562 1565 switch (action) { 1563 1566 case UNREGISTER: 1564 1567 case UNREGISTER_PROBE: 1568 + ccw_device_set_notoper(cdev); 1565 1569 /* Unregister device (will use subchannel lock). */ 1566 1570 spin_unlock_irqrestore(sch->lock, flags); 1567 1571 css_sch_device_unregister(sch); ··· 1574 1576 break; 1575 1577 case REPROBE: 1576 1578 ccw_device_trigger_reprobe(cdev); 1579 + break; 1580 + case DISC: 1581 + device_set_disconnected(cdev); 1577 1582 break; 1578 1583 default: 1579 1584 break; ··· 1829 1828 EXPORT_SYMBOL(get_ccwdev_by_busid); 1830 1829 EXPORT_SYMBOL(ccw_bus_type); 1831 1830 EXPORT_SYMBOL(ccw_device_work); 1832 - EXPORT_SYMBOL(ccw_device_notify_work); 1833 1831 EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
+1 -1
drivers/s390/cio/device.h
··· 72 72 } 73 73 74 74 extern struct workqueue_struct *ccw_device_work; 75 - extern struct workqueue_struct *ccw_device_notify_work; 76 75 extern wait_queue_head_t ccw_device_init_wq; 77 76 extern atomic_t ccw_device_init_count; 78 77 ··· 119 120 void ccw_device_trigger_reprobe(struct ccw_device *); 120 121 void ccw_device_kill_io(struct ccw_device *); 121 122 int ccw_device_notify(struct ccw_device *, int); 123 + void ccw_device_set_notoper(struct ccw_device *cdev); 122 124 123 125 /* qdio needs this. */ 124 126 void ccw_device_set_timeout(struct ccw_device *, int);
+19 -12
drivers/s390/cio/device_fsm.c
··· 337 337 return 0; 338 338 if (!cdev->online) 339 339 return 0; 340 + CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", 341 + cdev->private->dev_id.ssid, cdev->private->dev_id.devno, 342 + event); 340 343 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; 341 344 } 342 345 343 - static void 344 - ccw_device_oper_notify(struct work_struct *work) 346 + static void cmf_reenable_delayed(struct work_struct *work) 345 347 { 346 348 struct ccw_device_private *priv; 347 349 struct ccw_device *cdev; 348 - int ret; 349 350 350 351 priv = container_of(work, struct ccw_device_private, kick_work); 351 352 cdev = priv->cdev; 352 - ret = ccw_device_notify(cdev, CIO_OPER); 353 - if (ret) { 353 + cmf_reenable(cdev); 354 + } 355 + 356 + static void ccw_device_oper_notify(struct ccw_device *cdev) 357 + { 358 + if (ccw_device_notify(cdev, CIO_OPER)) { 354 359 /* Reenable channel measurements, if needed. */ 355 - cmf_reenable(cdev); 356 - wake_up(&cdev->private->wait_q); 357 - } else 358 - /* Driver doesn't want device back. */ 359 - ccw_device_do_unreg_rereg(work); 360 + PREPARE_WORK(&cdev->private->kick_work, cmf_reenable_delayed); 361 + queue_work(ccw_device_work, &cdev->private->kick_work); 362 + return; 363 + } 364 + /* Driver doesn't want device back. */ 365 + ccw_device_set_notoper(cdev); 366 + PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unreg_rereg); 367 + queue_work(ccw_device_work, &cdev->private->kick_work); 360 368 } 361 369 362 370 /* ··· 394 386 395 387 if (cdev->private->flags.donotify) { 396 388 cdev->private->flags.donotify = 0; 397 - PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify); 398 - queue_work(ccw_device_notify_work, &cdev->private->kick_work); 389 + ccw_device_oper_notify(cdev); 399 390 } 400 391 wake_up(&cdev->private->wait_q); 401 392
+3 -3
drivers/s390/cio/qdio_debug.h
··· 61 61 62 62 /* s390dbf views */ 63 63 #define QDIO_DBF_SETUP_LEN 8 64 - #define QDIO_DBF_SETUP_PAGES 4 64 + #define QDIO_DBF_SETUP_PAGES 8 65 65 #define QDIO_DBF_SETUP_NR_AREAS 1 66 66 67 67 #define QDIO_DBF_TRACE_LEN 8 68 68 #define QDIO_DBF_TRACE_NR_AREAS 2 69 69 70 70 #ifdef CONFIG_QDIO_DEBUG 71 - #define QDIO_DBF_TRACE_PAGES 16 71 + #define QDIO_DBF_TRACE_PAGES 32 72 72 #define QDIO_DBF_SETUP_LEVEL 6 73 73 #define QDIO_DBF_TRACE_LEVEL 4 74 74 #else /* !CONFIG_QDIO_DEBUG */ 75 - #define QDIO_DBF_TRACE_PAGES 4 75 + #define QDIO_DBF_TRACE_PAGES 8 76 76 #define QDIO_DBF_SETUP_LEVEL 2 77 77 #define QDIO_DBF_TRACE_LEVEL 2 78 78 #endif /* CONFIG_QDIO_DEBUG */
+34 -40
drivers/s390/cio/qdio_main.c
··· 330 330 int cc; 331 331 u32 busy_bit; 332 332 u64 start_time = 0; 333 + char dbf_text[15]; 333 334 334 335 QDIO_DBF_TEXT5(0, trace, "sigaout"); 335 336 QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); ··· 339 338 again: 340 339 cc = qdio_do_siga_output(q, &busy_bit); 341 340 if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) { 341 + sprintf(dbf_text, "bb%4x%2x", q->irq_ptr->schid.sch_no, q->nr); 342 + QDIO_DBF_TEXT3(0, trace, dbf_text); 343 + 342 344 if (!start_time) 343 345 start_time = get_usecs(); 344 346 else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) ··· 752 748 rc = qdio_siga_output(q); 753 749 switch (rc) { 754 750 case 0: 755 - /* went smooth this time, reset timestamp */ 756 - q->u.out.timestamp = 0; 757 - 758 751 /* TODO: improve error handling for CC=0 case */ 759 752 #ifdef CONFIG_QDIO_DEBUG 760 - QDIO_DBF_TEXT3(0, trace, "cc2reslv"); 761 - sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr, 762 - atomic_read(&q->u.out.busy_siga_counter)); 763 - QDIO_DBF_TEXT3(0, trace, dbf_text); 753 + if (q->u.out.timestamp) { 754 + QDIO_DBF_TEXT3(0, trace, "cc2reslv"); 755 + sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, 756 + q->nr, 757 + atomic_read(&q->u.out.busy_siga_counter)); 758 + QDIO_DBF_TEXT3(0, trace, dbf_text); 759 + } 764 760 #endif /* CONFIG_QDIO_DEBUG */ 761 + /* went smooth this time, reset timestamp */ 762 + q->u.out.timestamp = 0; 765 763 break; 766 764 /* cc=2 and busy bit */ 767 765 case (2 | QDIO_ERROR_SIGA_BUSY): ··· 1072 1066 if (IS_ERR(irb)) { 1073 1067 switch (PTR_ERR(irb)) { 1074 1068 case -EIO: 1075 - sprintf(dbf_text, "ierr%4x", 1076 - cdev->private->schid.sch_no); 1069 + sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no); 1077 1070 QDIO_DBF_TEXT2(1, setup, dbf_text); 1078 1071 qdio_int_error(cdev); 1079 1072 return; 1080 1073 case -ETIMEDOUT: 1081 - sprintf(dbf_text, "qtoh%4x", 1082 - cdev->private->schid.sch_no); 1074 + sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no); 1083 1075 QDIO_DBF_TEXT2(1, setup, dbf_text); 1084 1076 qdio_int_error(cdev); 1085 1077 return; ··· 1128 1124 struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev) 1129 1125 { 1130 1126 struct qdio_irq *irq_ptr; 1127 + char dbf_text[15]; 1131 1128 1132 - QDIO_DBF_TEXT0(0, setup, "getssqd"); 1129 + sprintf(dbf_text, "qssq%4x", cdev->private->schid.sch_no); 1130 + QDIO_DBF_TEXT0(0, setup, dbf_text); 1133 1131 1134 1132 irq_ptr = cdev->private->qdio_data; 1135 1133 if (!irq_ptr) ··· 1155 1149 char dbf_text[15]; 1156 1150 int rc; 1157 1151 1152 + sprintf(dbf_text, "qcln%4x", cdev->private->schid.sch_no); 1153 + QDIO_DBF_TEXT0(0, setup, dbf_text); 1154 + 1158 1155 irq_ptr = cdev->private->qdio_data; 1159 1156 if (!irq_ptr) 1160 1157 return -ENODEV; 1161 - 1162 - sprintf(dbf_text, "qcln%4x", irq_ptr->schid.sch_no); 1163 - QDIO_DBF_TEXT1(0, trace, dbf_text); 1164 - QDIO_DBF_TEXT0(0, setup, dbf_text); 1165 1158 1166 1159 rc = qdio_shutdown(cdev, how); 1167 1160 if (rc == 0) ··· 1196 1191 unsigned long flags; 1197 1192 char dbf_text[15]; 1198 1193 1194 + sprintf(dbf_text, "qshu%4x", cdev->private->schid.sch_no); 1195 + QDIO_DBF_TEXT0(0, setup, dbf_text); 1196 + 1199 1197 irq_ptr = cdev->private->qdio_data; 1200 1198 if (!irq_ptr) 1201 1199 return -ENODEV; ··· 1212 1204 mutex_unlock(&irq_ptr->setup_mutex); 1213 1205 return 0; 1214 1206 } 1215 - 1216 - sprintf(dbf_text, "qsqs%4x", irq_ptr->schid.sch_no); 1217 - QDIO_DBF_TEXT1(0, trace, dbf_text); 1218 - QDIO_DBF_TEXT0(0, setup, dbf_text); 1219 1207 1220 1208 tiqdio_remove_input_queues(irq_ptr); 1221 1209 qdio_shutdown_queues(cdev); ··· 1251 1247 1252 1248 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1253 1249 mutex_unlock(&irq_ptr->setup_mutex); 1254 - module_put(THIS_MODULE); 1255 1250 if (rc) 1256 1251 return rc; 1257 1252 return 0; ··· 1266 1263 struct qdio_irq *irq_ptr; 1267 1264 char dbf_text[15]; 1268 1265 1266 + sprintf(dbf_text, "qfre%4x", cdev->private->schid.sch_no); 1267 + QDIO_DBF_TEXT0(0, setup, dbf_text); 1268 + 1269 1269 irq_ptr = cdev->private->qdio_data; 1270 1270 if (!irq_ptr) 1271 1271 return -ENODEV; 1272 1272 1273 1273 mutex_lock(&irq_ptr->setup_mutex); 1274 - 1275 - sprintf(dbf_text, "qfqs%4x", irq_ptr->schid.sch_no); 1276 - QDIO_DBF_TEXT1(0, trace, dbf_text); 1277 - QDIO_DBF_TEXT0(0, setup, dbf_text); 1278 - 1279 1274 cdev->private->qdio_data = NULL; 1280 1275 mutex_unlock(&irq_ptr->setup_mutex); 1281 1276 ··· 1296 1295 1297 1296 sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no); 1298 1297 QDIO_DBF_TEXT0(0, setup, dbf_text); 1299 - QDIO_DBF_TEXT0(0, trace, dbf_text); 1300 1298 1301 1299 rc = qdio_allocate(init_data); 1302 1300 if (rc) ··· 1319 1319 1320 1320 sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no); 1321 1321 QDIO_DBF_TEXT0(0, setup, dbf_text); 1322 - QDIO_DBF_TEXT0(0, trace, dbf_text); 1323 1322 1324 1323 if ((init_data->no_input_qs && !init_data->input_handler) || 1325 1324 (init_data->no_output_qs && !init_data->output_handler)) ··· 1388 1389 unsigned long saveflags; 1389 1390 int rc; 1390 1391 1392 + sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no); 1393 + QDIO_DBF_TEXT0(0, setup, dbf_text); 1394 + 1391 1395 irq_ptr = cdev->private->qdio_data; 1392 1396 if (!irq_ptr) 1393 1397 return -ENODEV; 1394 1398 1395 1399 if (cdev->private->state != DEV_STATE_ONLINE) 1396 1400 return -EINVAL; 1397 - 1398 - if (!try_module_get(THIS_MODULE)) 1399 - return -EINVAL; 1400 - 1401 - sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no); 1402 - QDIO_DBF_TEXT0(0, setup, dbf_text); 1403 - QDIO_DBF_TEXT0(0, trace, dbf_text); 1404 1401 1405 1402 mutex_lock(&irq_ptr->setup_mutex); 1406 1403 qdio_setup_irq(init_data); ··· 1467 1472 unsigned long saveflags; 1468 1473 char dbf_text[20]; 1469 1474 1475 + sprintf(dbf_text, "qact%4x", cdev->private->schid.sch_no); 1476 + QDIO_DBF_TEXT0(0, setup, dbf_text); 1477 + 1470 1478 irq_ptr = cdev->private->qdio_data; 1471 1479 if (!irq_ptr) 1472 1480 return -ENODEV; ··· 1482 1484 rc = -EBUSY; 1483 1485 goto out; 1484 1486 } 1485 - 1486 - sprintf(dbf_text, "qact%4x", irq_ptr->schid.sch_no); 1487 - QDIO_DBF_TEXT2(0, setup, dbf_text); 1488 - QDIO_DBF_TEXT2(0, trace, dbf_text); 1489 1487 1490 1488 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd; 1491 1489 irq_ptr->ccw.flags = CCW_FLAG_SLI; ··· 1657 1663 #ifdef CONFIG_QDIO_DEBUG 1658 1664 char dbf_text[20]; 1659 1665 1660 - sprintf(dbf_text, "doQD%04x", cdev->private->schid.sch_no); 1666 + sprintf(dbf_text, "doQD%4x", cdev->private->schid.sch_no); 1661 1667 QDIO_DBF_TEXT3(0, trace, dbf_text); 1662 1668 #endif /* CONFIG_QDIO_DEBUG */ 1663 1669
+3 -3
drivers/s390/cio/qdio_setup.c
··· 165 165 void **output_sbal_array = qdio_init->output_sbal_addr_array; 166 166 int i; 167 167 168 - sprintf(dbf_text, "qfqs%4x", qdio_init->cdev->private->schid.sch_no); 168 + sprintf(dbf_text, "qset%4x", qdio_init->cdev->private->schid.sch_no); 169 169 QDIO_DBF_TEXT0(0, setup, dbf_text); 170 170 171 171 for_each_input_queue(irq_ptr, q, i) { ··· 285 285 rc = __get_ssqd_info(irq_ptr); 286 286 if (rc) { 287 287 QDIO_DBF_TEXT2(0, setup, "ssqdasig"); 288 - sprintf(dbf_text, "schno%x", irq_ptr->schid.sch_no); 288 + sprintf(dbf_text, "schn%4x", irq_ptr->schid.sch_no); 289 289 QDIO_DBF_TEXT2(0, setup, dbf_text); 290 290 sprintf(dbf_text, "rc:%d", rc); 291 291 QDIO_DBF_TEXT2(0, setup, dbf_text); ··· 447 447 { 448 448 char s[80]; 449 449 450 - sprintf(s, "%s ", cdev->dev.bus_id); 450 + sprintf(s, "%s sc:%x ", cdev->dev.bus_id, irq_ptr->schid.sch_no); 451 451 452 452 switch (irq_ptr->qib.qfmt) { 453 453 case QDIO_QETH_QFMT:
+5 -1
drivers/s390/cio/qdio_thinint.c
··· 113 113 struct qdio_q *q; 114 114 int i; 115 115 116 - for_each_input_queue(irq_ptr, q, i) { 116 + for (i = 0; i < irq_ptr->nr_input_qs; i++) { 117 + q = irq_ptr->input_qs[i]; 118 + /* if establish triggered an error */ 119 + if (!q || !q->entry.prev || !q->entry.next) 120 + continue; 117 121 list_del_rcu(&q->entry); 118 122 synchronize_rcu(); 119 123 }
-1
drivers/s390/net/ctcm_mpc.c
··· 19 19 #undef DEBUGDATA 20 20 #undef DEBUGCCW 21 21 22 - #include <linux/version.h> 23 22 #include <linux/module.h> 24 23 #include <linux/init.h> 25 24 #include <linux/kernel.h>
+2 -1
drivers/scsi/device_handler/scsi_dh_rdac.c
··· 376 376 if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' || 377 377 inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd') 378 378 return SCSI_DH_NOSYS; 379 - h->lun = scsilun_to_int((struct scsi_lun *)inqp->lun); 379 + h->lun = inqp->lun[7]; /* Uses only the last byte */ 380 380 } 381 381 return err; 382 382 } ··· 386 386 int err; 387 387 struct c9_inquiry *inqp; 388 388 389 + h->lun_state = RDAC_LUN_UNOWNED; 389 390 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h); 390 391 if (err == SCSI_DH_OK) { 391 392 inqp = &h->inq.c9;
-1
drivers/scsi/dpt/dpti_i2o.h
··· 21 21 22 22 #include <linux/i2o-dev.h> 23 23 24 - #include <linux/version.h> 25 24 #include <linux/notifier.h> 26 25 #include <asm/atomic.h> 27 26
+22 -15
drivers/scsi/ibmvscsi/ibmvfc.c
··· 556 556 /** 557 557 * ibmvfc_init_host - Start host initialization 558 558 * @vhost: ibmvfc host struct 559 + * @relogin: is this a re-login? 559 560 * 560 561 * Return value: 561 562 * nothing 562 563 **/ 563 - static void ibmvfc_init_host(struct ibmvfc_host *vhost) 564 + static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin) 564 565 { 565 566 struct ibmvfc_target *tgt; 566 567 ··· 575 574 } 576 575 577 576 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { 577 + if (!relogin) { 578 + memset(vhost->async_crq.msgs, 0, PAGE_SIZE); 579 + vhost->async_crq.cur = 0; 580 + } 581 + 578 582 list_for_each_entry(tgt, &vhost->targets, queue) 579 583 tgt->need_login = 1; 580 584 scsi_block_requests(vhost->host); ··· 1065 1059 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost) 1066 1060 { 1067 1061 long timeout = wait_event_timeout(vhost->init_wait_q, 1068 - (vhost->state == IBMVFC_ACTIVE || 1069 - vhost->state == IBMVFC_HOST_OFFLINE || 1070 - vhost->state == IBMVFC_LINK_DEAD), 1062 + ((vhost->state == IBMVFC_ACTIVE || 1063 + vhost->state == IBMVFC_HOST_OFFLINE || 1064 + vhost->state == IBMVFC_LINK_DEAD) && 1065 + vhost->action == IBMVFC_HOST_ACTION_NONE), 1071 1066 (init_timeout * HZ)); 1072 1067 1073 1068 return timeout ? 0 : -EIO; ··· 1457 1450 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd; 1458 1451 struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp; 1459 1452 struct scsi_cmnd *cmnd = evt->cmnd; 1460 - int rsp_len = 0; 1461 - int sense_len = rsp->fcp_sense_len; 1453 + u32 rsp_len = 0; 1454 + u32 sense_len = rsp->fcp_sense_len; 1462 1455 1463 1456 if (cmnd) { 1464 1457 if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID) ··· 1475 1468 rsp_len = rsp->fcp_rsp_len; 1476 1469 if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE) 1477 1470 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len; 1478 - if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len) 1471 + if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) 1479 1472 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); 1480 1473 1481 1474 ibmvfc_log_error(evt); ··· 2084 2077 { 2085 2078 const char *desc = ibmvfc_get_ae_desc(crq->event); 2086 2079 2087 - ibmvfc_log(vhost, 3, "%s event received\n", desc); 2080 + ibmvfc_log(vhost, 3, "%s event received. scsi_id: %lx, wwpn: %lx," 2081 + " node_name: %lx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); 2088 2082 2089 2083 switch (crq->event) { 2090 2084 case IBMVFC_AE_LINK_UP: 2091 2085 case IBMVFC_AE_RESUME: 2092 2086 vhost->events_to_log |= IBMVFC_AE_LINKUP; 2093 - ibmvfc_init_host(vhost); 2087 + ibmvfc_init_host(vhost, 1); 2094 2088 break; 2095 2089 case IBMVFC_AE_SCN_FABRIC: 2096 2090 vhost->events_to_log |= IBMVFC_AE_RSCN; 2097 - ibmvfc_init_host(vhost); 2091 + ibmvfc_init_host(vhost, 1); 2098 2092 break; 2099 2093 case IBMVFC_AE_SCN_NPORT: 2100 2094 case IBMVFC_AE_SCN_GROUP: ··· 2141 2133 /* Send back a response */ 2142 2134 rc = ibmvfc_send_crq_init_complete(vhost); 2143 2135 if (rc == 0) 2144 - ibmvfc_init_host(vhost); 2136 + ibmvfc_init_host(vhost, 0); 2145 2137 else 2146 2138 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc); 2147 2139 break; 2148 2140 case IBMVFC_CRQ_INIT_COMPLETE: 2149 2141 dev_info(vhost->dev, "Partner initialization complete\n"); 2150 - ibmvfc_init_host(vhost); 2142 + ibmvfc_init_host(vhost, 0); 2151 2143 break; 2152 2144 default: 2153 2145 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format); ··· 3365 3357 mad->buffer.va = vhost->login_buf_dma; 3366 3358 mad->buffer.len = sizeof(*vhost->login_buf); 3367 3359 3368 - memset(vhost->async_crq.msgs, 0, PAGE_SIZE); 3369 - vhost->async_crq.cur = 0; 3370 3360 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); 3371 3361 3372 3362 if (!ibmvfc_send_event(evt, vhost, default_timeout)) ··· 3607 3601 } 3608 3602 } 3609 3603 3610 - if (vhost->reinit) { 3604 + if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { 3611 3605 vhost->reinit = 0; 3606 + scsi_block_requests(vhost->host); 3612 3607 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); 3613 3608 } else { 3614 3609 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+2 -2
drivers/scsi/ibmvscsi/ibmvfc.h
··· 29 29 #include "viosrp.h" 30 30 31 31 #define IBMVFC_NAME "ibmvfc" 32 - #define IBMVFC_DRIVER_VERSION "1.0.1" 33 - #define IBMVFC_DRIVER_DATE "(July 11, 2008)" 32 + #define IBMVFC_DRIVER_VERSION "1.0.2" 33 + #define IBMVFC_DRIVER_DATE "(August 14, 2008)" 34 34 35 35 #define IBMVFC_DEFAULT_TIMEOUT 15 36 36 #define IBMVFC_INIT_TIMEOUT 30
+1 -1
drivers/scsi/ibmvscsi/ibmvscsi.c
··· 1636 1636 unsigned long desired_io = max_requests * sizeof(union viosrp_iu); 1637 1637 1638 1638 /* add io space for sg data */ 1639 - desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 1639 + desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 * 1640 1640 IBMVSCSI_CMDS_PER_LUN_DEFAULT); 1641 1641 1642 1642 return desired_io;
-1
drivers/scsi/ips.c
··· 165 165 #include <asm/byteorder.h> 166 166 #include <asm/page.h> 167 167 #include <linux/stddef.h> 168 - #include <linux/version.h> 169 168 #include <linux/string.h> 170 169 #include <linux/errno.h> 171 170 #include <linux/kernel.h>
-1
drivers/scsi/ips.h
··· 50 50 #ifndef _IPS_H_ 51 51 #define _IPS_H_ 52 52 53 - #include <linux/version.h> 54 53 #include <linux/nmi.h> 55 54 #include <asm/uaccess.h> 56 55 #include <asm/io.h>
-1
drivers/scsi/lpfc/lpfc_debugfs.c
··· 27 27 #include <linux/pci.h> 28 28 #include <linux/spinlock.h> 29 29 #include <linux/ctype.h> 30 - #include <linux/version.h> 31 30 32 31 #include <scsi/scsi.h> 33 32 #include <scsi/scsi_device.h>
+116 -3
drivers/scsi/megaraid/megaraid_sas.c
··· 10 10 * 2 of the License, or (at your option) any later version. 11 11 * 12 12 * FILE : megaraid_sas.c 13 - * Version : v00.00.03.20-rc1 13 + * Version : v00.00.04.01-rc1 14 14 * 15 15 * Authors: 16 16 * (email-id : megaraidlinux@lsi.com) ··· 71 71 /* ppc IOP */ 72 72 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, 73 73 /* ppc IOP */ 74 + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, 75 + /* gen2*/ 76 + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, 77 + /* gen2*/ 74 78 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, 75 79 /* xscale IOP, vega */ 76 80 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, ··· 202 198 */ 203 199 writel(status, &regs->outbound_intr_status); 204 200 201 + /* Dummy readl to force pci flush */ 202 + readl(&regs->outbound_intr_status); 203 + 205 204 return 0; 206 205 } 207 206 ··· 300 293 */ 301 294 writel(status, &regs->outbound_doorbell_clear); 302 295 296 + /* Dummy readl to force pci flush */ 297 + readl(&regs->outbound_doorbell_clear); 298 + 303 299 return 0; 304 300 } 305 301 /** ··· 325 315 .disable_intr = megasas_disable_intr_ppc, 326 316 .clear_intr = megasas_clear_intr_ppc, 327 317 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 318 + }; 319 + 320 + /** 321 + * The following functions are defined for gen2 (deviceid : 0x78 0x79) 322 + * controllers 323 + */ 324 + 325 + /** 326 + * megasas_enable_intr_gen2 - Enables interrupts 327 + * @regs: MFI register set 328 + */ 329 + static inline void 330 + megasas_enable_intr_gen2(struct megasas_register_set __iomem *regs) 331 + { 332 + writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 333 + 334 + /* write ~0x00000005 (4 & 1) to the intr mask*/ 335 + writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 336 + 337 + /* Dummy readl to force pci flush */ 338 + readl(&regs->outbound_intr_mask); 339 + } 340 + 341 + /** 342 + * megasas_disable_intr_gen2 - Disables interrupt 343 + * @regs: MFI register set 344 + */ 345 + static inline void 346 + megasas_disable_intr_gen2(struct megasas_register_set __iomem *regs) 347 + { 348 + u32 mask = 0xFFFFFFFF; 349 + writel(mask, &regs->outbound_intr_mask); 350 + /* Dummy readl to force pci flush */ 351 + readl(&regs->outbound_intr_mask); 352 + } 353 + 354 + /** 355 + * megasas_read_fw_status_reg_gen2 - returns the current FW status value 356 + * @regs: MFI register set 357 + */ 358 + static u32 359 + megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs) 360 + { 361 + return readl(&(regs)->outbound_scratch_pad); 362 + } 363 + 364 + /** 365 + * megasas_clear_interrupt_gen2 - Check & clear interrupt 366 + * @regs: MFI register set 367 + */ 368 + static int 369 + megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) 370 + { 371 + u32 status; 372 + /* 373 + * Check if it is our interrupt 374 + */ 375 + status = readl(&regs->outbound_intr_status); 376 + 377 + if (!(status & MFI_GEN2_ENABLE_INTERRUPT_MASK)) 378 + return 1; 379 + 380 + /* 381 + * Clear the interrupt by writing back the same value 382 + */ 383 + writel(status, &regs->outbound_doorbell_clear); 384 + 385 + /* Dummy readl to force pci flush */ 386 + readl(&regs->outbound_intr_status); 387 + 388 + return 0; 389 + } 390 + /** 391 + * megasas_fire_cmd_gen2 - Sends command to the FW 392 + * @frame_phys_addr : Physical address of cmd 393 + * @frame_count : Number of frames for the command 394 + * @regs : MFI register set 395 + */ 396 + static inline void 397 + megasas_fire_cmd_gen2(dma_addr_t frame_phys_addr, u32 frame_count, 398 + struct megasas_register_set __iomem *regs) 399 + { 400 + writel((frame_phys_addr | (frame_count<<1))|1, 401 + &(regs)->inbound_queue_port); 402 + } 403 + 404 + static struct megasas_instance_template megasas_instance_template_gen2 = { 405 + 406 + .fire_cmd = megasas_fire_cmd_gen2, 407 + .enable_intr = megasas_enable_intr_gen2, 408 + .disable_intr = megasas_disable_intr_gen2, 409 + .clear_intr = megasas_clear_intr_gen2, 410 + .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 328 411 }; 329 412 330 413 /** ··· 2079 1976 /* 2080 1977 * Map the message registers 2081 1978 */ 2082 - instance->base_addr = pci_resource_start(instance->pdev, 0); 1979 + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) || 1980 + (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) { 1981 + instance->base_addr = pci_resource_start(instance->pdev, 1); 1982 + } else { 1983 + instance->base_addr = pci_resource_start(instance->pdev, 0); 1984 + } 2083 1985 2084 1986 if (pci_request_regions(instance->pdev, "megasas: LSI")) { 2085 1987 printk(KERN_DEBUG "megasas: IO memory region busy!\n"); ··· 2105 1997 case PCI_DEVICE_ID_LSI_SAS1078R: 2106 1998 case PCI_DEVICE_ID_LSI_SAS1078DE: 2107 1999 instance->instancet = &megasas_instance_template_ppc; 2000 + break; 2001 + case PCI_DEVICE_ID_LSI_SAS1078GEN2: 2002 + case PCI_DEVICE_ID_LSI_SAS0079GEN2: 2003 + instance->instancet = &megasas_instance_template_gen2; 2108 2004 break; 2109 2005 case PCI_DEVICE_ID_LSI_SAS1064R: 2110 2006 case PCI_DEVICE_ID_DELL_PERC5: ··· 2969 2857 { 2970 2858 struct megasas_instance *instance = pci_get_drvdata(pdev); 2971 2859 megasas_flush_cache(instance); 2860 + megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 2972 2861 } 2973 2862 2974 2863 /** ··· 3405 3292 return retval; 3406 3293 } 3407 3294 3408 - static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUGO, megasas_sysfs_show_dbg_lvl, 3295 + static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, 3409 3296 megasas_sysfs_set_dbg_lvl); 3410 3297 3411 3298 static ssize_t
+7 -3
drivers/scsi/megaraid/megaraid_sas.h
··· 18 18 /* 19 19 * MegaRAID SAS Driver meta data 20 20 */ 21 - #define MEGASAS_VERSION "00.00.03.20-rc1" 22 - #define MEGASAS_RELDATE "March 10, 2008" 23 - #define MEGASAS_EXT_VERSION "Mon. March 10 11:02:31 PDT 2008" 21 + #define MEGASAS_VERSION "00.00.04.01" 22 + #define MEGASAS_RELDATE "July 24, 2008" 23 + #define MEGASAS_EXT_VERSION "Thu July 24 11:41:51 PST 2008" 24 24 25 25 /* 26 26 * Device IDs ··· 28 28 #define PCI_DEVICE_ID_LSI_SAS1078R 0x0060 29 29 #define PCI_DEVICE_ID_LSI_SAS1078DE 0x007C 30 30 #define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413 31 + #define PCI_DEVICE_ID_LSI_SAS1078GEN2 0x0078 32 + #define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079 31 33 32 34 /* 33 35 * ===================================== ··· 582 580 #define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10) 583 581 584 582 #define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 583 + #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001 584 + #define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004) 585 585 586 586 /* 587 587 * register set for both 1068 and 1078 controllers
-1
drivers/scsi/nsp32.c
··· 23 23 * 1.2: PowerPC (big endian) support. 24 24 */ 25 25 26 - #include <linux/version.h> 27 26 #include <linux/module.h> 28 27 #include <linux/init.h> 29 28 #include <linux/kernel.h>
-1
drivers/scsi/nsp32.h
··· 16 16 #ifndef _NSP32_H 17 17 #define _NSP32_H 18 18 19 - #include <linux/version.h> 20 19 //#define NSP32_DEBUG 9 21 20 22 21 /*
-1
drivers/scsi/pcmcia/nsp_cs.c
··· 25 25 26 26 ***********************************************************************/ 27 27 28 - #include <linux/version.h> 29 28 #include <linux/module.h> 30 29 #include <linux/kernel.h> 31 30 #include <linux/init.h>
+11
drivers/scsi/qla2xxx/qla_attr.c
··· 993 993 { 994 994 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 995 995 996 + /* 997 + * At this point all fcport's software-states are cleared. Perform any 998 + * final cleanup of firmware resources (PCBs and XCBs). 999 + */ 1000 + if (fcport->loop_id != FC_NO_LOOP_ID) { 1001 + fcport->ha->isp_ops->fabric_logout(fcport->ha, fcport->loop_id, 1002 + fcport->d_id.b.domain, fcport->d_id.b.area, 1003 + fcport->d_id.b.al_pa); 1004 + fcport->loop_id = FC_NO_LOOP_ID; 1005 + } 1006 + 996 1007 qla2x00_abort_fcport_cmds(fcport); 997 1008 scsi_target_unblock(&rport->dev); 998 1009 }
+1
drivers/scsi/qla2xxx/qla_def.h
··· 2237 2237 #define REGISTER_FDMI_NEEDED 26 2238 2238 #define FCPORT_UPDATE_NEEDED 27 2239 2239 #define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */ 2240 + #define UNLOADING 29 2240 2241 2241 2242 uint32_t device_flags; 2242 2243 #define DFLG_LOCAL_DEVICES BIT_0
+6 -2
drivers/scsi/qla2xxx/qla_init.c
··· 976 976 &ha->fw_attributes, &ha->fw_memory_size); 977 977 qla2x00_resize_request_q(ha); 978 978 ha->flags.npiv_supported = 0; 979 - if ((IS_QLA24XX(ha) || IS_QLA25XX(ha)) && 980 - (ha->fw_attributes & BIT_2)) { 979 + if ((IS_QLA24XX(ha) || IS_QLA25XX(ha) || 980 + IS_QLA84XX(ha)) && 981 + (ha->fw_attributes & BIT_2)) { 981 982 ha->flags.npiv_supported = 1; 982 983 if ((!ha->max_npiv_vports) || 983 984 ((ha->max_npiv_vports + 1) % ··· 3252 3251 { 3253 3252 int rval; 3254 3253 uint8_t status = 0; 3254 + scsi_qla_host_t *vha; 3255 3255 3256 3256 if (ha->flags.online) { 3257 3257 ha->flags.online = 0; ··· 3267 3265 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 3268 3266 atomic_set(&ha->loop_state, LOOP_DOWN); 3269 3267 qla2x00_mark_all_devices_lost(ha, 0); 3268 + list_for_each_entry(vha, &ha->vp_list, vp_list) 3269 + qla2x00_mark_all_devices_lost(vha, 0); 3270 3270 } else { 3271 3271 if (!atomic_read(&ha->loop_down_timer)) 3272 3272 atomic_set(&ha->loop_down_timer,
+7 -7
drivers/scsi/qla2xxx/qla_isr.c
··· 879 879 sp->request_sense_ptr += sense_len; 880 880 sp->request_sense_length -= sense_len; 881 881 if (sp->request_sense_length != 0) 882 - sp->ha->status_srb = sp; 882 + sp->fcport->ha->status_srb = sp; 883 883 884 884 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 885 - "cmd=%p pid=%ld\n", __func__, sp->ha->host_no, cp->device->channel, 886 - cp->device->id, cp->device->lun, cp, cp->serial_number)); 885 + "cmd=%p pid=%ld\n", __func__, sp->fcport->ha->host_no, 886 + cp->device->channel, cp->device->id, cp->device->lun, cp, 887 + cp->serial_number)); 887 888 if (sense_len) 888 889 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, 889 890 CMD_ACTUAL_SNSLEN(cp))); ··· 1185 1184 atomic_read(&fcport->state))); 1186 1185 1187 1186 cp->result = DID_BUS_BUSY << 16; 1188 - if (atomic_read(&fcport->state) == FCS_ONLINE) { 1189 - qla2x00_mark_device_lost(ha, fcport, 1, 1); 1190 - } 1187 + if (atomic_read(&fcport->state) == FCS_ONLINE) 1188 + qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); 1191 1189 break; 1192 1190 1193 1191 case CS_RESET: ··· 1229 1229 1230 1230 /* Check to see if logout occurred. */ 1231 1231 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT)) 1232 - qla2x00_mark_device_lost(ha, fcport, 1, 1); 1232 + qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); 1233 1233 break; 1234 1234 1235 1235 default:
+1 -1
drivers/scsi/qla2xxx/qla_mbx.c
··· 2686 2686 set_bit(VP_IDX_ACQUIRED, &vha->vp_flags); 2687 2687 set_bit(VP_DPC_NEEDED, &ha->dpc_flags); 2688 2688 2689 - wake_up_process(ha->dpc_thread); 2689 + qla2xxx_wake_dpc(ha); 2690 2690 } 2691 2691 } 2692 2692
-1
drivers/scsi/qla2xxx/qla_mid.c
··· 6 6 */ 7 7 #include "qla_def.h" 8 8 9 - #include <linux/version.h> 10 9 #include <linux/moduleparam.h> 11 10 #include <linux/vmalloc.h> 12 11 #include <linux/smp_lock.h>
+12 -6
drivers/scsi/qla2xxx/qla_os.c
··· 780 780 sp = pha->outstanding_cmds[cnt]; 781 781 if (!sp) 782 782 continue; 783 - if (ha->vp_idx != sp->ha->vp_idx) 783 + 784 + if (ha->vp_idx != sp->fcport->ha->vp_idx) 784 785 continue; 785 786 match = 0; 786 787 switch (type) { ··· 1081 1080 sp = ha->outstanding_cmds[cnt]; 1082 1081 if (sp) { 1083 1082 ha->outstanding_cmds[cnt] = NULL; 1084 - sp->flags = 0; 1085 1083 sp->cmd->result = res; 1086 - sp->cmd->host_scribble = (unsigned char *)NULL; 1087 1084 qla2x00_sp_compl(ha, sp); 1088 1085 } 1089 1086 } ··· 1775 1776 static void 1776 1777 qla2x00_remove_one(struct pci_dev *pdev) 1777 1778 { 1778 - scsi_qla_host_t *ha; 1779 + scsi_qla_host_t *ha, *vha, *temp; 1779 1780 1780 1781 ha = pci_get_drvdata(pdev); 1782 + 1783 + list_for_each_entry_safe(vha, temp, &ha->vp_list, vp_list) 1784 + fc_vport_terminate(vha->fc_vport); 1785 + 1786 + set_bit(UNLOADING, &ha->dpc_flags); 1781 1787 1782 1788 qla2x00_dfs_remove(ha); 1783 1789 ··· 2455 2451 void 2456 2452 qla2xxx_wake_dpc(scsi_qla_host_t *ha) 2457 2453 { 2458 - if (ha->dpc_thread) 2459 - wake_up_process(ha->dpc_thread); 2454 + struct task_struct *t = ha->dpc_thread; 2455 + 2456 + if (!test_bit(UNLOADING, &ha->dpc_flags) && t) 2457 + wake_up_process(t); 2460 2458 } 2461 2459 2462 2460 /*
+1 -1
drivers/scsi/qla2xxx/qla_version.h
··· 7 7 /* 8 8 * Driver version 9 9 */ 10 - #define QLA2XXX_VERSION "8.02.01-k6" 10 + #define QLA2XXX_VERSION "8.02.01-k7" 11 11 12 12 #define QLA_DRIVER_MAJOR_VER 8 13 13 #define QLA_DRIVER_MINOR_VER 2
+8
drivers/ssb/main.c
··· 1165 1165 1166 1166 int ssb_dma_set_mask(struct ssb_device *dev, u64 mask) 1167 1167 { 1168 + #ifdef CONFIG_SSB_PCIHOST 1168 1169 int err; 1170 + #endif 1169 1171 1170 1172 switch (dev->bus->bustype) { 1171 1173 case SSB_BUSTYPE_PCI: 1174 + #ifdef CONFIG_SSB_PCIHOST 1172 1175 err = pci_set_dma_mask(dev->bus->host_pci, mask); 1173 1176 if (err) 1174 1177 return err; 1175 1178 err = pci_set_consistent_dma_mask(dev->bus->host_pci, mask); 1176 1179 return err; 1180 + #endif 1177 1181 case SSB_BUSTYPE_SSB: 1178 1182 return dma_set_mask(dev->dev, mask); 1179 1183 default: ··· 1192 1188 { 1193 1189 switch (dev->bus->bustype) { 1194 1190 case SSB_BUSTYPE_PCI: 1191 + #ifdef CONFIG_SSB_PCIHOST 1195 1192 if (gfp_flags & GFP_DMA) { 1196 1193 /* Workaround: The PCI API does not support passing 1197 1194 * a GFP flag. */ ··· 1200 1195 size, dma_handle, gfp_flags); 1201 1196 } 1202 1197 return pci_alloc_consistent(dev->bus->host_pci, size, dma_handle); 1198 + #endif 1203 1199 case SSB_BUSTYPE_SSB: 1204 1200 return dma_alloc_coherent(dev->dev, size, dma_handle, gfp_flags); 1205 1201 default: ··· 1216 1210 { 1217 1211 switch (dev->bus->bustype) { 1218 1212 case SSB_BUSTYPE_PCI: 1213 + #ifdef CONFIG_SSB_PCIHOST 1219 1214 if (gfp_flags & GFP_DMA) { 1220 1215 /* Workaround: The PCI API does not support passing 1221 1216 * a GFP flag. */ ··· 1227 1220 pci_free_consistent(dev->bus->host_pci, size, 1228 1221 vaddr, dma_handle); 1229 1222 return; 1223 + #endif 1230 1224 case SSB_BUSTYPE_SSB: 1231 1225 dma_free_coherent(dev->dev, size, vaddr, dma_handle); 1232 1226 return;
+13
drivers/uio/Kconfig
··· 33 33 34 34 If you don't know what to do here, say N. 35 35 36 + config UIO_PDRV_GENIRQ 37 + tristate "Userspace I/O platform driver with generic IRQ handling" 38 + help 39 + Platform driver for Userspace I/O devices, including generic 40 + interrupt handling code. Shared interrupts are not supported. 41 + 42 + This kernel driver requires that the matching userspace driver 43 + handles interrupts in a special way. Userspace is responsible 44 + for acknowledging the hardware device if needed, and re-enabling 45 + interrupts in the interrupt controller using the write() syscall. 46 + 47 + If you don't know what to do here, say N. 48 + 36 49 config UIO_SMX 37 50 tristate "SMX cryptengine UIO interface" 38 51 default n
+1
drivers/uio/Makefile
··· 1 1 obj-$(CONFIG_UIO) += uio.o 2 2 obj-$(CONFIG_UIO_CIF) += uio_cif.o 3 3 obj-$(CONFIG_UIO_PDRV) += uio_pdrv.o 4 + obj-$(CONFIG_UIO_PDRV_GENIRQ) += uio_pdrv_genirq.o 4 5 obj-$(CONFIG_UIO_SMX) += uio_smx.o
+3 -1
drivers/uio/uio_pdrv.c
··· 88 88 89 89 uio_unregister_device(pdata->uioinfo); 90 90 91 + kfree(pdata); 92 + 91 93 return 0; 92 94 } 93 95 ··· 116 114 117 115 MODULE_AUTHOR("Uwe Kleine-Koenig"); 118 116 MODULE_DESCRIPTION("Userspace I/O platform driver"); 119 - MODULE_LICENSE("GPL"); 117 + MODULE_LICENSE("GPL v2"); 120 118 MODULE_ALIAS("platform:" DRIVER_NAME);
+188
drivers/uio/uio_pdrv_genirq.c
··· 1 + /* 2 + * drivers/uio/uio_pdrv_genirq.c 3 + * 4 + * Userspace I/O platform driver with generic IRQ handling code. 5 + * 6 + * Copyright (C) 2008 Magnus Damm 7 + * 8 + * Based on uio_pdrv.c by Uwe Kleine-Koenig, 9 + * Copyright (C) 2008 by Digi International Inc. 10 + * All rights reserved. 11 + * 12 + * This program is free software; you can redistribute it and/or modify it 13 + * under the terms of the GNU General Public License version 2 as published by 14 + * the Free Software Foundation. 15 + */ 16 + 17 + #include <linux/platform_device.h> 18 + #include <linux/uio_driver.h> 19 + #include <linux/spinlock.h> 20 + #include <linux/bitops.h> 21 + #include <linux/interrupt.h> 22 + #include <linux/stringify.h> 23 + 24 + #define DRIVER_NAME "uio_pdrv_genirq" 25 + 26 + struct uio_pdrv_genirq_platdata { 27 + struct uio_info *uioinfo; 28 + spinlock_t lock; 29 + unsigned long flags; 30 + }; 31 + 32 + static irqreturn_t uio_pdrv_genirq_handler(int irq, struct uio_info *dev_info) 33 + { 34 + struct uio_pdrv_genirq_platdata *priv = dev_info->priv; 35 + 36 + /* Just disable the interrupt in the interrupt controller, and 37 + * remember the state so we can allow user space to enable it later. 38 + */ 39 + 40 + if (!test_and_set_bit(0, &priv->flags)) 41 + disable_irq_nosync(irq); 42 + 43 + return IRQ_HANDLED; 44 + } 45 + 46 + static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on) 47 + { 48 + struct uio_pdrv_genirq_platdata *priv = dev_info->priv; 49 + unsigned long flags; 50 + 51 + /* Allow user space to enable and disable the interrupt 52 + * in the interrupt controller, but keep track of the 53 + * state to prevent per-irq depth damage. 54 + * 55 + * Serialize this operation to support multiple tasks. 56 + */ 57 + 58 + spin_lock_irqsave(&priv->lock, flags); 59 + if (irq_on) { 60 + if (test_and_clear_bit(0, &priv->flags)) 61 + enable_irq(dev_info->irq); 62 + } else { 63 + if (!test_and_set_bit(0, &priv->flags)) 64 + disable_irq(dev_info->irq); 65 + } 66 + spin_unlock_irqrestore(&priv->lock, flags); 67 + 68 + return 0; 69 + } 70 + 71 + static int uio_pdrv_genirq_probe(struct platform_device *pdev) 72 + { 73 + struct uio_info *uioinfo = pdev->dev.platform_data; 74 + struct uio_pdrv_genirq_platdata *priv; 75 + struct uio_mem *uiomem; 76 + int ret = -EINVAL; 77 + int i; 78 + 79 + if (!uioinfo || !uioinfo->name || !uioinfo->version) { 80 + dev_err(&pdev->dev, "missing platform_data\n"); 81 + goto bad0; 82 + } 83 + 84 + if (uioinfo->handler || uioinfo->irqcontrol || uioinfo->irq_flags) { 85 + dev_err(&pdev->dev, "interrupt configuration error\n"); 86 + goto bad0; 87 + } 88 + 89 + priv = kzalloc(sizeof(*priv), GFP_KERNEL); 90 + if (!priv) { 91 + ret = -ENOMEM; 92 + dev_err(&pdev->dev, "unable to kmalloc\n"); 93 + goto bad0; 94 + } 95 + 96 + priv->uioinfo = uioinfo; 97 + spin_lock_init(&priv->lock); 98 + priv->flags = 0; /* interrupt is enabled to begin with */ 99 + 100 + uiomem = &uioinfo->mem[0]; 101 + 102 + for (i = 0; i < pdev->num_resources; ++i) { 103 + struct resource *r = &pdev->resource[i]; 104 + 105 + if (r->flags != IORESOURCE_MEM) 106 + continue; 107 + 108 + if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) { 109 + dev_warn(&pdev->dev, "device has more than " 110 + __stringify(MAX_UIO_MAPS) 111 + " I/O memory resources.\n"); 112 + break; 113 + } 114 + 115 + uiomem->memtype = UIO_MEM_PHYS; 116 + uiomem->addr = r->start; 117 + uiomem->size = r->end - r->start + 1; 118 + ++uiomem; 119 + } 120 + 121 + while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) { 122 + uiomem->size = 0; 123 + ++uiomem; 124 + } 125 + 126 + /* This driver requires no hardware specific kernel code to handle 127 + * interrupts. Instead, the interrupt handler simply disables the 128 + * interrupt in the interrupt controller. User space is responsible 129 + * for performing hardware specific acknowledge and re-enabling of 130 + * the interrupt in the interrupt controller. 131 + * 132 + * Interrupt sharing is not supported. 133 + */ 134 + 135 + uioinfo->irq_flags = IRQF_DISABLED; 136 + uioinfo->handler = uio_pdrv_genirq_handler; 137 + uioinfo->irqcontrol = uio_pdrv_genirq_irqcontrol; 138 + uioinfo->priv = priv; 139 + 140 + ret = uio_register_device(&pdev->dev, priv->uioinfo); 141 + if (ret) { 142 + dev_err(&pdev->dev, "unable to register uio device\n"); 143 + goto bad1; 144 + } 145 + 146 + platform_set_drvdata(pdev, priv); 147 + return 0; 148 + bad1: 149 + kfree(priv); 150 + bad0: 151 + return ret; 152 + } 153 + 154 + static int uio_pdrv_genirq_remove(struct platform_device *pdev) 155 + { 156 + struct uio_pdrv_genirq_platdata *priv = platform_get_drvdata(pdev); 157 + 158 + uio_unregister_device(priv->uioinfo); 159 + kfree(priv); 160 + return 0; 161 + } 162 + 163 + static struct platform_driver uio_pdrv_genirq = { 164 + .probe = uio_pdrv_genirq_probe, 165 + .remove = uio_pdrv_genirq_remove, 166 + .driver = { 167 + .name = DRIVER_NAME, 168 + .owner = THIS_MODULE, 169 + }, 170 + }; 171 + 172 + static int __init uio_pdrv_genirq_init(void) 173 + { 174 + return platform_driver_register(&uio_pdrv_genirq); 175 + } 176 + 177 + static void __exit uio_pdrv_genirq_exit(void) 178 + { 179 + platform_driver_unregister(&uio_pdrv_genirq); 180 + } 181 + 182 + module_init(uio_pdrv_genirq_init); 183 + module_exit(uio_pdrv_genirq_exit); 184 + 185 + MODULE_AUTHOR("Magnus Damm"); 186 + MODULE_DESCRIPTION("Userspace I/O platform driver with generic IRQ handling"); 187 + MODULE_LICENSE("GPL v2"); 188 + MODULE_ALIAS("platform:" DRIVER_NAME);
-1
drivers/usb/atm/ueagle-atm.c
··· 64 64 #include <linux/ctype.h> 65 65 #include <linux/sched.h> 66 66 #include <linux/kthread.h> 67 - #include <linux/version.h> 68 67 #include <linux/mutex.h> 69 68 #include <linux/freezer.h> 70 69
+4 -1
drivers/usb/class/cdc-acm.c
··· 589 589 tasklet_schedule(&acm->urb_task); 590 590 591 591 done: 592 - err_out: 593 592 mutex_unlock(&acm->mutex); 593 + err_out: 594 594 mutex_unlock(&open_mutex); 595 595 return rv; 596 596 ··· 1360 1360 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1361 1361 }, 1362 1362 { USB_DEVICE(0x0803, 0x3095), /* Zoom Telephonics Model 3095F USB MODEM */ 1363 + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1364 + }, 1365 + { USB_DEVICE(0x0572, 0x1321), /* Conexant USB MODEM CX93010 */ 1363 1366 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1364 1367 }, 1365 1368
+49 -47
drivers/usb/core/driver.c
··· 230 230 */ 231 231 intf->pm_usage_cnt = !(driver->supports_autosuspend); 232 232 233 + /* Carry out a deferred switch to altsetting 0 */ 234 + if (intf->needs_altsetting0) { 235 + usb_set_interface(udev, intf->altsetting[0]. 236 + desc.bInterfaceNumber, 0); 237 + intf->needs_altsetting0 = 0; 238 + } 239 + 233 240 error = driver->probe(intf, id); 234 241 if (error) { 235 242 mark_quiesced(intf); ··· 273 266 274 267 driver->disconnect(intf); 275 268 276 - /* reset other interface state */ 277 - usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0); 269 + /* Reset other interface state. 270 + * We cannot do a Set-Interface if the device is suspended or 271 + * if it is prepared for a system sleep (since installing a new 272 + * altsetting means creating new endpoint device entries). 273 + * When either of these happens, defer the Set-Interface. 274 + */ 275 + if (!error && intf->dev.power.status == DPM_ON) 276 + usb_set_interface(udev, intf->altsetting[0]. 277 + desc.bInterfaceNumber, 0); 278 + else 279 + intf->needs_altsetting0 = 1; 278 280 usb_set_intfdata(intf, NULL); 279 281 280 282 intf->condition = USB_INTERFACE_UNBOUND; ··· 814 798 * The caller must hold @intf's device's lock, but not its pm_mutex 815 799 * and not @intf->dev.sem. 816 800 * 817 - * FIXME: The caller must block system sleep transitions. 801 + * Note: Rebinds will be skipped if a system sleep transition is in 802 + * progress and the PM "complete" callback hasn't occurred yet. 818 803 */ 819 804 void usb_rebind_intf(struct usb_interface *intf) 820 805 { ··· 831 814 } 832 815 833 816 /* Try to rebind the interface */ 834 - intf->needs_binding = 0; 835 - rc = device_attach(&intf->dev); 836 - if (rc < 0) 837 - dev_warn(&intf->dev, "rebind failed: %d\n", rc); 817 + if (intf->dev.power.status == DPM_ON) { 818 + intf->needs_binding = 0; 819 + rc = device_attach(&intf->dev); 820 + if (rc < 0) 821 + dev_warn(&intf->dev, "rebind failed: %d\n", rc); 822 + } 838 823 } 839 824 840 825 #ifdef CONFIG_PM ··· 848 829 * or rebind interfaces that have been unbound, according to @action. 849 830 * 850 831 * The caller must hold @udev's device lock. 851 - * FIXME: For rebinds, the caller must block system sleep transitions. 852 832 */ 853 833 static void do_unbind_rebind(struct usb_device *udev, int action) 854 834 { ··· 869 851 } 870 852 break; 871 853 case DO_REBIND: 872 - if (intf->needs_binding) { 873 - 874 - /* FIXME: The next line is needed because we are going to probe 875 - * the interface, but as far as the PM core is concerned the 876 - * interface is still suspended. The problem wouldn't exist 877 - * if we could rebind the interface during the interface's own 878 - * resume() call, but at the time the usb_device isn't locked! 879 - * 880 - * The real solution will be to carry this out during the device's 881 - * complete() callback. Until that is implemented, we have to 882 - * use this hack. 883 - */ 884 - // intf->dev.power.sleeping = 0; 885 - 854 + if (intf->needs_binding) 886 855 usb_rebind_intf(intf); 887 - } 888 856 break; 889 857 } 890 858 } ··· 930 926 } 931 927 932 928 /* Caller has locked intf's usb_device's pm mutex */ 933 - static int usb_suspend_interface(struct usb_interface *intf, pm_message_t msg) 929 + static int usb_suspend_interface(struct usb_device *udev, 930 + struct usb_interface *intf, pm_message_t msg) 934 931 { 935 932 struct usb_driver *driver; 936 933 int status = 0; 937 934 938 935 /* with no hardware, USB interfaces only use FREEZE and ON states */ 939 - if (interface_to_usbdev(intf)->state == USB_STATE_NOTATTACHED || 940 - !is_active(intf)) 936 + if (udev->state == USB_STATE_NOTATTACHED || !is_active(intf)) 941 937 goto done; 942 938 943 939 if (intf->condition == USB_INTERFACE_UNBOUND) /* This can't happen */ ··· 948 944 status = driver->suspend(intf, msg); 949 945 if (status == 0) 950 946 mark_quiesced(intf); 951 - else if (!interface_to_usbdev(intf)->auto_pm) 947 + else if (!udev->auto_pm) 952 948 dev_err(&intf->dev, "%s error %d\n", 953 949 "suspend", status); 954 950 } else { ··· 965 961 } 966 962 967 963 /* Caller has locked intf's usb_device's pm_mutex */ 968 - static int usb_resume_interface(struct usb_interface *intf, int reset_resume) 964 + static int usb_resume_interface(struct usb_device *udev, 965 + struct usb_interface *intf, int reset_resume) 969 966 { 970 967 struct usb_driver *driver; 971 968 int status = 0; 972 969 973 - if (interface_to_usbdev(intf)->state == USB_STATE_NOTATTACHED || 974 - is_active(intf)) 970 + if (udev->state == USB_STATE_NOTATTACHED || is_active(intf)) 975 971 goto done; 976 972 977 973 /* Don't let autoresume interfere with unbinding */ ··· 979 975 goto done; 980 976 981 977 /* Can't resume it if it doesn't have a driver. */ 982 - if (intf->condition == USB_INTERFACE_UNBOUND) 978 + if (intf->condition == USB_INTERFACE_UNBOUND) { 979 + 980 + /* Carry out a deferred switch to altsetting 0 */ 981 + if (intf->needs_altsetting0 && 982 + intf->dev.power.status == DPM_ON) { 983 + usb_set_interface(udev, intf->altsetting[0]. 984 + desc.bInterfaceNumber, 0); 985 + intf->needs_altsetting0 = 0; 986 + } 983 987 goto done; 988 + } 984 989 985 990 /* Don't resume if the interface is marked for rebinding */ 986 991 if (intf->needs_binding) ··· 1164 1151 if (udev->actconfig) { 1165 1152 for (; i < udev->actconfig->desc.bNumInterfaces; i++) { 1166 1153 intf = udev->actconfig->interface[i]; 1167 - status = usb_suspend_interface(intf, msg); 1154 + status = usb_suspend_interface(udev, intf, msg); 1168 1155 if (status != 0) 1169 1156 break; 1170 1157 } ··· 1176 1163 if (status != 0) { 1177 1164 while (--i >= 0) { 1178 1165 intf = udev->actconfig->interface[i]; 1179 - usb_resume_interface(intf, 0); 1166 + usb_resume_interface(udev, intf, 0); 1180 1167 } 1181 1168 1182 1169 /* Try another autosuspend when the interfaces aren't busy */ ··· 1289 1276 if (status == 0 && udev->actconfig) { 1290 1277 for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { 1291 1278 intf = udev->actconfig->interface[i]; 1292 - usb_resume_interface(intf, udev->reset_resume); 1279 + usb_resume_interface(udev, intf, udev->reset_resume); 1293 1280 } 1294 1281 } 1295 1282 ··· 1618 1605 return status; 1619 1606 } 1620 1607 1621 - static int usb_suspend(struct device *dev, pm_message_t message) 1608 + int usb_suspend(struct device *dev, pm_message_t message) 1622 1609 { 1623 1610 struct usb_device *udev; 1624 1611 1625 - if (!is_usb_device(dev)) /* Ignore PM for interfaces */ 1626 - return 0; 1627 1612 udev = to_usb_device(dev); 1628 1613 1629 1614 /* If udev is already suspended, we can skip this suspend and ··· 1640 1629 return usb_external_suspend_device(udev, message); 1641 1630 } 1642 1631 1643 - static int usb_resume(struct device *dev) 1632 + int usb_resume(struct device *dev) 1644 1633 { 1645 1634 struct usb_device *udev; 1646 1635 1647 - if (!is_usb_device(dev)) /* Ignore PM for interfaces */ 1648 - return 0; 1649 1636 udev = to_usb_device(dev); 1650 1637 1651 1638 /* If udev->skip_sys_resume is set then udev was already suspended ··· 1655 1646 return usb_external_resume_device(udev); 1656 1647 } 1657 1648 1658 - #else 1659 - 1660 - #define usb_suspend NULL 1661 - #define usb_resume NULL 1662 - 1663 1649 #endif /* CONFIG_PM */ 1664 1650 1665 1651 struct bus_type usb_bus_type = { 1666 1652 .name = "usb", 1667 1653 .match = usb_device_match, 1668 1654 .uevent = usb_uevent, 1669 - .suspend = usb_suspend, 1670 - .resume = usb_resume, 1671 1655 };
-9
drivers/usb/core/hcd.c
··· 924 924 return retval; 925 925 } 926 926 927 - void usb_enable_root_hub_irq (struct usb_bus *bus) 928 - { 929 - struct usb_hcd *hcd; 930 - 931 - hcd = container_of (bus, struct usb_hcd, self); 932 - if (hcd->driver->hub_irq_enable && hcd->state != HC_STATE_HALT) 933 - hcd->driver->hub_irq_enable (hcd); 934 - } 935 - 936 927 937 928 /*-------------------------------------------------------------------------*/ 938 929
-4
drivers/usb/core/hcd.h
··· 212 212 int (*bus_suspend)(struct usb_hcd *); 213 213 int (*bus_resume)(struct usb_hcd *); 214 214 int (*start_port_reset)(struct usb_hcd *, unsigned port_num); 215 - void (*hub_irq_enable)(struct usb_hcd *); 216 - /* Needed only if port-change IRQs are level-triggered */ 217 215 218 216 /* force handover of high-speed port to full-speed companion */ 219 217 void (*relinquish_port)(struct usb_hcd *, int); ··· 376 378 extern struct list_head usb_bus_list; 377 379 extern struct mutex usb_bus_list_lock; 378 380 extern wait_queue_head_t usb_kill_urb_queue; 379 - 380 - extern void usb_enable_root_hub_irq(struct usb_bus *bus); 381 381 382 382 extern int usb_find_interface_driver(struct usb_device *dev, 383 383 struct usb_interface *interface);
-9
drivers/usb/core/hub.c
··· 2102 2102 } 2103 2103 2104 2104 clear_bit(port1, hub->busy_bits); 2105 - if (!hub->hdev->parent && !hub->busy_bits[0]) 2106 - usb_enable_root_hub_irq(hub->hdev->bus); 2107 2105 2108 2106 status = check_port_resume_type(udev, 2109 2107 hub, port1, status, portchange, portstatus); ··· 3079 3081 } 3080 3082 } 3081 3083 3082 - /* If this is a root hub, tell the HCD it's okay to 3083 - * re-enable port-change interrupts now. */ 3084 - if (!hdev->parent && !hub->busy_bits[0]) 3085 - usb_enable_root_hub_irq(hdev->bus); 3086 - 3087 3084 loop_autopm: 3088 3085 /* Allow autosuspend if we're not going to run again */ 3089 3086 if (list_empty(&hub->event_list)) ··· 3304 3311 break; 3305 3312 } 3306 3313 clear_bit(port1, parent_hub->busy_bits); 3307 - if (!parent_hdev->parent && !parent_hub->busy_bits[0]) 3308 - usb_enable_root_hub_irq(parent_hdev->bus); 3309 3314 3310 3315 if (ret < 0) 3311 3316 goto re_enumerate;
+7 -2
drivers/usb/core/urb.c
··· 601 601 void usb_unlink_anchored_urbs(struct usb_anchor *anchor) 602 602 { 603 603 struct urb *victim; 604 + unsigned long flags; 604 605 605 - spin_lock_irq(&anchor->lock); 606 + spin_lock_irqsave(&anchor->lock, flags); 606 607 while (!list_empty(&anchor->urb_list)) { 607 608 victim = list_entry(anchor->urb_list.prev, struct urb, 608 609 anchor_list); 610 + usb_get_urb(victim); 611 + spin_unlock_irqrestore(&anchor->lock, flags); 609 612 /* this will unanchor the URB */ 610 613 usb_unlink_urb(victim); 614 + usb_put_urb(victim); 615 + spin_lock_irqsave(&anchor->lock, flags); 611 616 } 612 - spin_unlock_irq(&anchor->lock); 617 + spin_unlock_irqrestore(&anchor->lock, flags); 613 618 } 614 619 EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs); 615 620
+67 -6
drivers/usb/core/usb.c
··· 219 219 } 220 220 #endif /* CONFIG_HOTPLUG */ 221 221 222 - struct device_type usb_device_type = { 223 - .name = "usb_device", 224 - .release = usb_release_dev, 225 - .uevent = usb_dev_uevent, 226 - }; 227 - 228 222 #ifdef CONFIG_PM 229 223 230 224 static int ksuspend_usb_init(void) ··· 238 244 destroy_workqueue(ksuspend_usb_wq); 239 245 } 240 246 247 + /* USB device Power-Management thunks. 248 + * There's no need to distinguish here between quiescing a USB device 249 + * and powering it down; the generic_suspend() routine takes care of 250 + * it by skipping the usb_port_suspend() call for a quiesce. And for 251 + * USB interfaces there's no difference at all. 252 + */ 253 + 254 + static int usb_dev_prepare(struct device *dev) 255 + { 256 + return 0; /* Implement eventually? */ 257 + } 258 + 259 + static void usb_dev_complete(struct device *dev) 260 + { 261 + /* Currently used only for rebinding interfaces */ 262 + usb_resume(dev); /* Implement eventually? */ 263 + } 264 + 265 + static int usb_dev_suspend(struct device *dev) 266 + { 267 + return usb_suspend(dev, PMSG_SUSPEND); 268 + } 269 + 270 + static int usb_dev_resume(struct device *dev) 271 + { 272 + return usb_resume(dev); 273 + } 274 + 275 + static int usb_dev_freeze(struct device *dev) 276 + { 277 + return usb_suspend(dev, PMSG_FREEZE); 278 + } 279 + 280 + static int usb_dev_thaw(struct device *dev) 281 + { 282 + return usb_resume(dev); 283 + } 284 + 285 + static int usb_dev_poweroff(struct device *dev) 286 + { 287 + return usb_suspend(dev, PMSG_HIBERNATE); 288 + } 289 + 290 + static int usb_dev_restore(struct device *dev) 291 + { 292 + return usb_resume(dev); 293 + } 294 + 295 + static struct pm_ops usb_device_pm_ops = { 296 + .prepare = usb_dev_prepare, 297 + .complete = usb_dev_complete, 298 + .suspend = usb_dev_suspend, 299 + .resume = usb_dev_resume, 300 + .freeze = usb_dev_freeze, 301 + .thaw = usb_dev_thaw, 302 + .poweroff = usb_dev_poweroff, 303 + .restore = usb_dev_restore, 304 + }; 305 + 241 306 #else 242 307 243 308 #define ksuspend_usb_init() 0 244 309 #define ksuspend_usb_cleanup() do {} while (0) 310 + #define usb_device_pm_ops (*(struct pm_ops *)0) 245 311 246 312 #endif /* CONFIG_PM */ 313 + 314 + struct device_type usb_device_type = { 315 + .name = "usb_device", 316 + .release = usb_release_dev, 317 + .uevent = usb_dev_uevent, 318 + .pm = &usb_device_pm_ops, 319 + }; 247 320 248 321 249 322 /* Returns 1 if @usb_bus is WUSB, 0 otherwise */
+3
drivers/usb/core/usb.h
··· 41 41 42 42 #ifdef CONFIG_PM 43 43 44 + extern int usb_suspend(struct device *dev, pm_message_t msg); 45 + extern int usb_resume(struct device *dev); 46 + 44 47 extern void usb_autosuspend_work(struct work_struct *work); 45 48 extern int usb_port_suspend(struct usb_device *dev); 46 49 extern int usb_port_resume(struct usb_device *dev);
-1
drivers/usb/gadget/amd5536udc.c
··· 44 44 #include <linux/module.h> 45 45 #include <linux/pci.h> 46 46 #include <linux/kernel.h> 47 - #include <linux/version.h> 48 47 #include <linux/delay.h> 49 48 #include <linux/ioport.h> 50 49 #include <linux/sched.h>
+1 -1
drivers/usb/gadget/pxa27x_udc.c
··· 1622 1622 struct pxa_udc *udc = the_controller; 1623 1623 int retval; 1624 1624 1625 - if (!driver || driver->speed != USB_SPEED_FULL || !driver->bind 1625 + if (!driver || driver->speed < USB_SPEED_FULL || !driver->bind 1626 1626 || !driver->disconnect || !driver->setup) 1627 1627 return -EINVAL; 1628 1628 if (!udc)
-1
drivers/usb/gadget/s3c2410_udc.c
··· 35 35 #include <linux/list.h> 36 36 #include <linux/interrupt.h> 37 37 #include <linux/platform_device.h> 38 - #include <linux/version.h> 39 38 #include <linux/clk.h> 40 39 41 40 #include <linux/debugfs.h>
+1 -1
drivers/usb/host/isp1760-hcd.c
··· 988 988 /* 989 989 * write bank1 address twice to ensure the 90ns delay (time 990 990 * between BANK0 write and the priv_read_copy() call is at 991 - * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns) 991 + * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 109ns) 992 992 */ 993 993 isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs + 994 994 HC_MEMORY_REG);
-1
drivers/usb/host/ohci-at91.c
··· 260 260 */ 261 261 .hub_status_data = ohci_hub_status_data, 262 262 .hub_control = ohci_hub_control, 263 - .hub_irq_enable = ohci_rhsc_enable, 264 263 #ifdef CONFIG_PM 265 264 .bus_suspend = ohci_bus_suspend, 266 265 .bus_resume = ohci_bus_resume,
-1
drivers/usb/host/ohci-au1xxx.c
··· 163 163 */ 164 164 .hub_status_data = ohci_hub_status_data, 165 165 .hub_control = ohci_hub_control, 166 - .hub_irq_enable = ohci_rhsc_enable, 167 166 #ifdef CONFIG_PM 168 167 .bus_suspend = ohci_bus_suspend, 169 168 .bus_resume = ohci_bus_resume,
-1
drivers/usb/host/ohci-ep93xx.c
··· 134 134 .get_frame_number = ohci_get_frame, 135 135 .hub_status_data = ohci_hub_status_data, 136 136 .hub_control = ohci_hub_control, 137 - .hub_irq_enable = ohci_rhsc_enable, 138 137 #ifdef CONFIG_PM 139 138 .bus_suspend = ohci_bus_suspend, 140 139 .bus_resume = ohci_bus_resume,
+31 -22
drivers/usb/host/ohci-hub.c
··· 36 36 37 37 /*-------------------------------------------------------------------------*/ 38 38 39 - /* hcd->hub_irq_enable() */ 40 - static void ohci_rhsc_enable (struct usb_hcd *hcd) 41 - { 42 - struct ohci_hcd *ohci = hcd_to_ohci (hcd); 43 - 44 - spin_lock_irq(&ohci->lock); 45 - if (!ohci->autostop) 46 - del_timer(&hcd->rh_timer); /* Prevent next poll */ 47 - ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable); 48 - spin_unlock_irq(&ohci->lock); 49 - } 50 - 51 39 #define OHCI_SCHED_ENABLES \ 52 40 (OHCI_CTRL_CLE|OHCI_CTRL_BLE|OHCI_CTRL_PLE|OHCI_CTRL_IE) 53 41 ··· 362 374 int any_connected) 363 375 { 364 376 int poll_rh = 1; 377 + int rhsc; 365 378 379 + rhsc = ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC; 366 380 switch (ohci->hc_control & OHCI_CTRL_HCFS) { 367 381 368 382 case OHCI_USB_OPER: 369 - /* keep on polling until we know a device is connected 370 - * and RHSC is enabled */ 383 + /* If no status changes are pending, enable status-change 384 + * interrupts. 385 + */ 386 + if (!rhsc && !changed) { 387 + rhsc = OHCI_INTR_RHSC; 388 + ohci_writel(ohci, rhsc, &ohci->regs->intrenable); 389 + } 390 + 391 + /* Keep on polling until we know a device is connected 392 + * and RHSC is enabled, or until we autostop. 393 + */ 371 394 if (!ohci->autostop) { 372 395 if (any_connected || 373 396 !device_may_wakeup(&ohci_to_hcd(ohci) 374 397 ->self.root_hub->dev)) { 375 - if (ohci_readl(ohci, &ohci->regs->intrenable) & 376 - OHCI_INTR_RHSC) 398 + if (rhsc) 377 399 poll_rh = 0; 378 400 } else { 379 401 ohci->autostop = 1; ··· 396 398 ohci->autostop = 0; 397 399 ohci->next_statechange = jiffies + 398 400 STATECHANGE_DELAY; 399 - } else if (time_after_eq(jiffies, 401 + } else if (rhsc && time_after_eq(jiffies, 400 402 ohci->next_statechange) 401 403 && !ohci->ed_rm_list 402 404 && !(ohci->hc_control & 403 405 OHCI_SCHED_ENABLES)) { 404 406 ohci_rh_suspend(ohci, 1); 407 + poll_rh = 0; 405 408 } 406 409 } 407 410 break; ··· 416 417 else 417 418 usb_hcd_resume_root_hub(ohci_to_hcd(ohci)); 418 419 } else { 420 + if (!rhsc && (ohci->autostop || 421 + ohci_to_hcd(ohci)->self.root_hub-> 422 + do_remote_wakeup)) 423 + ohci_writel(ohci, OHCI_INTR_RHSC, 424 + &ohci->regs->intrenable); 425 + 419 426 /* everything is idle, no need for polling */ 420 427 poll_rh = 0; 421 428 } ··· 443 438 static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed, 444 439 int any_connected) 445 440 { 446 - int poll_rh = 1; 447 - 448 - /* keep on polling until RHSC is enabled */ 441 + /* If RHSC is enabled, don't poll */ 449 442 if (ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC) 450 - poll_rh = 0; 451 - return poll_rh; 443 + return 0; 444 + 445 + /* If no status changes are pending, enable status-change interrupts */ 446 + if (!changed) { 447 + ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable); 448 + return 0; 449 + } 450 + return 1; 452 451 } 453 452 454 453 #endif /* CONFIG_PM */
-1
drivers/usb/host/ohci-lh7a404.c
··· 193 193 */ 194 194 .hub_status_data = ohci_hub_status_data, 195 195 .hub_control = ohci_hub_control, 196 - .hub_irq_enable = ohci_rhsc_enable, 197 196 #ifdef CONFIG_PM 198 197 .bus_suspend = ohci_bus_suspend, 199 198 .bus_resume = ohci_bus_resume,
-1
drivers/usb/host/ohci-omap.c
··· 470 470 */ 471 471 .hub_status_data = ohci_hub_status_data, 472 472 .hub_control = ohci_hub_control, 473 - .hub_irq_enable = ohci_rhsc_enable, 474 473 #ifdef CONFIG_PM 475 474 .bus_suspend = ohci_bus_suspend, 476 475 .bus_resume = ohci_bus_resume,
-1
drivers/usb/host/ohci-pci.c
··· 459 459 */ 460 460 .hub_status_data = ohci_hub_status_data, 461 461 .hub_control = ohci_hub_control, 462 - .hub_irq_enable = ohci_rhsc_enable, 463 462 #ifdef CONFIG_PM 464 463 .bus_suspend = ohci_bus_suspend, 465 464 .bus_resume = ohci_bus_resume,
-1
drivers/usb/host/ohci-pnx4008.c
··· 277 277 */ 278 278 .hub_status_data = ohci_hub_status_data, 279 279 .hub_control = ohci_hub_control, 280 - .hub_irq_enable = ohci_rhsc_enable, 281 280 #ifdef CONFIG_PM 282 281 .bus_suspend = ohci_bus_suspend, 283 282 .bus_resume = ohci_bus_resume,
-1
drivers/usb/host/ohci-pnx8550.c
··· 201 201 */ 202 202 .hub_status_data = ohci_hub_status_data, 203 203 .hub_control = ohci_hub_control, 204 - .hub_irq_enable = ohci_rhsc_enable, 205 204 #ifdef CONFIG_PM 206 205 .bus_suspend = ohci_bus_suspend, 207 206 .bus_resume = ohci_bus_resume,
-1
drivers/usb/host/ohci-ppc-of.c
··· 72 72 */ 73 73 .hub_status_data = ohci_hub_status_data, 74 74 .hub_control = ohci_hub_control, 75 - .hub_irq_enable = ohci_rhsc_enable, 76 75 #ifdef CONFIG_PM 77 76 .bus_suspend = ohci_bus_suspend, 78 77 .bus_resume = ohci_bus_resume,
-1
drivers/usb/host/ohci-ppc-soc.c
··· 172 172 */ 173 173 .hub_status_data = ohci_hub_status_data, 174 174 .hub_control = ohci_hub_control, 175 - .hub_irq_enable = ohci_rhsc_enable, 176 175 #ifdef CONFIG_PM 177 176 .bus_suspend = ohci_bus_suspend, 178 177 .bus_resume = ohci_bus_resume,
-1
drivers/usb/host/ohci-ps3.c
··· 68 68 .get_frame_number = ohci_get_frame, 69 69 .hub_status_data = ohci_hub_status_data, 70 70 .hub_control = ohci_hub_control, 71 - .hub_irq_enable = ohci_rhsc_enable, 72 71 .start_port_reset = ohci_start_port_reset, 73 72 #if defined(CONFIG_PM) 74 73 .bus_suspend = ohci_bus_suspend,
-1
drivers/usb/host/ohci-pxa27x.c
··· 298 298 */ 299 299 .hub_status_data = ohci_hub_status_data, 300 300 .hub_control = ohci_hub_control, 301 - .hub_irq_enable = ohci_rhsc_enable, 302 301 #ifdef CONFIG_PM 303 302 .bus_suspend = ohci_bus_suspend, 304 303 .bus_resume = ohci_bus_resume,
-1
drivers/usb/host/ohci-s3c2410.c
··· 466 466 */ 467 467 .hub_status_data = ohci_s3c2410_hub_status_data, 468 468 .hub_control = ohci_s3c2410_hub_control, 469 - .hub_irq_enable = ohci_rhsc_enable, 470 469 #ifdef CONFIG_PM 471 470 .bus_suspend = ohci_bus_suspend, 472 471 .bus_resume = ohci_bus_resume,
-1
drivers/usb/host/ohci-sa1111.c
··· 231 231 */ 232 232 .hub_status_data = ohci_hub_status_data, 233 233 .hub_control = ohci_hub_control, 234 - .hub_irq_enable = ohci_rhsc_enable, 235 234 #ifdef CONFIG_PM 236 235 .bus_suspend = ohci_bus_suspend, 237 236 .bus_resume = ohci_bus_resume,
-1
drivers/usb/host/ohci-sh.c
··· 68 68 */ 69 69 .hub_status_data = ohci_hub_status_data, 70 70 .hub_control = ohci_hub_control, 71 - .hub_irq_enable = ohci_rhsc_enable, 72 71 #ifdef CONFIG_PM 73 72 .bus_suspend = ohci_bus_suspend, 74 73 .bus_resume = ohci_bus_resume,
-1
drivers/usb/host/ohci-sm501.c
··· 75 75 */ 76 76 .hub_status_data = ohci_hub_status_data, 77 77 .hub_control = ohci_hub_control, 78 - .hub_irq_enable = ohci_rhsc_enable, 79 78 #ifdef CONFIG_PM 80 79 .bus_suspend = ohci_bus_suspend, 81 80 .bus_resume = ohci_bus_resume,
-1
drivers/usb/host/ohci-ssb.c
··· 81 81 82 82 .hub_status_data = ohci_hub_status_data, 83 83 .hub_control = ohci_hub_control, 84 - .hub_irq_enable = ohci_rhsc_enable, 85 84 #ifdef CONFIG_PM 86 85 .bus_suspend = ohci_bus_suspend, 87 86 .bus_resume = ohci_bus_resume,
-11
drivers/usb/host/u132-hcd.c
··· 2934 2934 return 0; 2935 2935 } 2936 2936 2937 - static void u132_hub_irq_enable(struct usb_hcd *hcd) 2938 - { 2939 - struct u132 *u132 = hcd_to_u132(hcd); 2940 - if (u132->going > 1) { 2941 - dev_err(&u132->platform_dev->dev, "device has been removed %d\n" 2942 - , u132->going); 2943 - } else if (u132->going > 0) 2944 - dev_err(&u132->platform_dev->dev, "device is being removed\n"); 2945 - } 2946 - 2947 2937 2948 2938 #ifdef CONFIG_PM 2949 2939 static int u132_bus_suspend(struct usb_hcd *hcd) ··· 2985 2995 .bus_suspend = u132_bus_suspend, 2986 2996 .bus_resume = u132_bus_resume, 2987 2997 .start_port_reset = u132_start_port_reset, 2988 - .hub_irq_enable = u132_hub_irq_enable, 2989 2998 }; 2990 2999 2991 3000 /*
-1
drivers/usb/misc/iowarrior.c
··· 19 19 #include <linux/slab.h> 20 20 #include <linux/sched.h> 21 21 #include <linux/poll.h> 22 - #include <linux/version.h> 23 22 #include <linux/usb/iowarrior.h> 24 23 25 24 /* Version Information */
+1
drivers/usb/misc/sisusbvga/sisusb.c
··· 3270 3270 { USB_DEVICE(0x0711, 0x0900) }, 3271 3271 { USB_DEVICE(0x0711, 0x0901) }, 3272 3272 { USB_DEVICE(0x0711, 0x0902) }, 3273 + { USB_DEVICE(0x0711, 0x0918) }, 3273 3274 { USB_DEVICE(0x182d, 0x021c) }, 3274 3275 { USB_DEVICE(0x182d, 0x0269) }, 3275 3276 { }
+6 -7
drivers/usb/musb/Kconfig
··· 165 165 help 166 166 Enable DMA transfers on TUSB 6010 when OMAP DMA is available. 167 167 168 - config USB_MUSB_LOGLEVEL 168 + config USB_MUSB_DEBUG 169 169 depends on USB_MUSB_HDRC 170 - int 'Logging Level (0 - none / 3 - annoying / ... )' 171 - default 0 170 + bool "Enable debugging messages" 171 + default n 172 172 help 173 - Set the logging level. 0 disables the debugging altogether, 174 - although when USB_DEBUG is set the value is at least 1. 175 - Starting at level 3, per-transfer (urb, usb_request, packet, 176 - or dma transfer) tracing may kick in. 173 + This enables musb debugging. To set the logging level use the debug 174 + module parameter. Starting at level 3, per-transfer (urb, usb_request, 175 + packet, or dma transfer) tracing may kick in.
+2 -19
drivers/usb/musb/Makefile
··· 64 64 65 65 # Debugging 66 66 67 - MUSB_DEBUG:=$(CONFIG_USB_MUSB_LOGLEVEL) 68 - 69 - ifeq ("$(strip $(MUSB_DEBUG))","") 70 - ifdef CONFIG_USB_DEBUG 71 - MUSB_DEBUG:=1 72 - else 73 - MUSB_DEBUG:=0 74 - endif 67 + ifeq ($(CONFIG_USB_MUSB_DEBUG),y) 68 + EXTRA_CFLAGS += -DDEBUG 75 69 endif 76 - 77 - ifneq ($(MUSB_DEBUG),0) 78 - EXTRA_CFLAGS += -DDEBUG 79 - 80 - ifeq ($(CONFIG_PROC_FS),y) 81 - musb_hdrc-objs += musb_procfs.o 82 - endif 83 - 84 - endif 85 - 86 - EXTRA_CFLAGS += -DMUSB_DEBUG=$(MUSB_DEBUG)
+15 -23
drivers/usb/musb/musb_core.c
··· 114 114 115 115 116 116 117 - #if MUSB_DEBUG > 0 118 - unsigned debug = MUSB_DEBUG; 119 - module_param(debug, uint, 0); 120 - MODULE_PARM_DESC(debug, "initial debug message level"); 121 - 122 - #define MUSB_VERSION_SUFFIX "/dbg" 123 - #endif 117 + unsigned debug; 118 + module_param(debug, uint, S_IRUGO | S_IWUSR); 119 + MODULE_PARM_DESC(debug, "Debug message level. Default = 0"); 124 120 125 121 #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia" 126 122 #define DRIVER_DESC "Inventra Dual-Role USB Controller Driver" 127 123 128 - #define MUSB_VERSION_BASE "6.0" 129 - 130 - #ifndef MUSB_VERSION_SUFFIX 131 - #define MUSB_VERSION_SUFFIX "" 132 - #endif 133 - #define MUSB_VERSION MUSB_VERSION_BASE MUSB_VERSION_SUFFIX 124 + #define MUSB_VERSION "6.0" 134 125 135 126 #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION 136 127 ··· 2028 2037 musb->xceiv.state = OTG_STATE_A_IDLE; 2029 2038 2030 2039 status = usb_add_hcd(musb_to_hcd(musb), -1, 0); 2040 + if (status) 2041 + goto fail; 2031 2042 2032 2043 DBG(1, "%s mode, status %d, devctl %02x %c\n", 2033 2044 "HOST", status, ··· 2044 2051 musb->xceiv.state = OTG_STATE_B_IDLE; 2045 2052 2046 2053 status = musb_gadget_setup(musb); 2054 + if (status) 2055 + goto fail; 2047 2056 2048 2057 DBG(1, "%s mode, status %d, dev%02x\n", 2049 2058 is_otg_enabled(musb) ? "OTG" : "PERIPHERAL", ··· 2054 2059 2055 2060 } 2056 2061 2057 - if (status == 0) 2058 - musb_debug_create("driver/musb_hdrc", musb); 2059 - else { 2062 + return 0; 2063 + 2060 2064 fail: 2061 - if (musb->clock) 2062 - clk_put(musb->clock); 2063 - device_init_wakeup(dev, 0); 2064 - musb_free(musb); 2065 - return status; 2066 - } 2065 + if (musb->clock) 2066 + clk_put(musb->clock); 2067 + device_init_wakeup(dev, 0); 2068 + musb_free(musb); 2069 + return status; 2067 2070 2068 2071 #ifdef CONFIG_SYSFS 2069 2072 status = device_create_file(dev, &dev_attr_mode); ··· 2124 2131 * - OTG mode: both roles are deactivated (or never-activated) 2125 2132 */ 2126 2133 musb_shutdown(pdev); 2127 - musb_debug_delete("driver/musb_hdrc", musb); 2128 2134 #ifdef CONFIG_USB_MUSB_HDRC_HCD 2129 2135 if (musb->board_mode == MUSB_HOST) 2130 2136 usb_remove_hcd(musb_to_hcd(musb));
-19
drivers/usb/musb/musb_core.h
··· 485 485 extern int __init musb_platform_init(struct musb *musb); 486 486 extern int musb_platform_exit(struct musb *musb); 487 487 488 - /*-------------------------- ProcFS definitions ---------------------*/ 489 - 490 - struct proc_dir_entry; 491 - 492 - #if (MUSB_DEBUG > 0) && defined(MUSB_CONFIG_PROC_FS) 493 - extern struct proc_dir_entry *musb_debug_create(char *name, struct musb *data); 494 - extern void musb_debug_delete(char *name, struct musb *data); 495 - 496 - #else 497 - static inline struct proc_dir_entry * 498 - musb_debug_create(char *name, struct musb *data) 499 - { 500 - return NULL; 501 - } 502 - static inline void musb_debug_delete(char *name, struct musb *data) 503 - { 504 - } 505 - #endif 506 - 507 488 #endif /* __MUSB_CORE_H__ */
-4
drivers/usb/musb/musb_debug.h
··· 48 48 __func__, __LINE__ , ## args); \ 49 49 } } while (0) 50 50 51 - #if MUSB_DEBUG > 0 52 51 extern unsigned debug; 53 - #else 54 - #define debug 0 55 - #endif 56 52 57 53 static inline int _dbg_level(unsigned l) 58 54 {
+2
drivers/usb/musb/musb_gadget_ep0.c
··· 476 476 return; 477 477 musb->ackpend = 0; 478 478 } 479 + musb_ep_select(musb->mregs, 0); 479 480 musb_writew(regs, MUSB_CSR0, tmp); 480 481 } 481 482 ··· 529 528 } 530 529 531 530 /* send it out, triggering a "txpktrdy cleared" irq */ 531 + musb_ep_select(musb->mregs, 0); 532 532 musb_writew(regs, MUSB_CSR0, csr); 533 533 } 534 534
-830
drivers/usb/musb/musb_procfs.c
··· 1 - /* 2 - * MUSB OTG driver debug support 3 - * 4 - * Copyright 2005 Mentor Graphics Corporation 5 - * Copyright (C) 2005-2006 by Texas Instruments 6 - * Copyright (C) 2006-2007 Nokia Corporation 7 - * 8 - * This program is free software; you can redistribute it and/or 9 - * modify it under the terms of the GNU General Public License 10 - * version 2 as published by the Free Software Foundation. 11 - * 12 - * This program is distributed in the hope that it will be useful, but 13 - * WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 - * General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 20 - * 02110-1301 USA 21 - * 22 - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 23 - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 25 - * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 28 - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 29 - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 - * 33 - */ 34 - 35 - #include <linux/kernel.h> 36 - #include <linux/proc_fs.h> 37 - #include <linux/seq_file.h> 38 - #include <linux/uaccess.h> /* FIXME remove procfs writes */ 39 - #include <asm/arch/hardware.h> 40 - 41 - #include "musb_core.h" 42 - 43 - #include "davinci.h" 44 - 45 - #ifdef CONFIG_USB_MUSB_HDRC_HCD 46 - 47 - static int dump_qh(struct musb_qh *qh, char *buf, unsigned max) 48 - { 49 - int count; 50 - int tmp; 51 - struct usb_host_endpoint *hep = qh->hep; 52 - struct urb *urb; 53 - 54 - count = snprintf(buf, max, " qh %p dev%d ep%d%s max%d\n", 55 - qh, qh->dev->devnum, qh->epnum, 56 - ({ char *s; switch (qh->type) { 57 - case USB_ENDPOINT_XFER_BULK: 58 - s = "-bulk"; break; 59 - case USB_ENDPOINT_XFER_INT: 60 - s = "-int"; break; 61 - case USB_ENDPOINT_XFER_CONTROL: 62 - s = ""; break; 63 - default: 64 - s = "iso"; break; 65 - }; s; }), 66 - qh->maxpacket); 67 - if (count <= 0) 68 - return 0; 69 - buf += count; 70 - max -= count; 71 - 72 - list_for_each_entry(urb, &hep->urb_list, urb_list) { 73 - tmp = snprintf(buf, max, "\t%s urb %p %d/%d\n", 74 - usb_pipein(urb->pipe) ? "in" : "out", 75 - urb, urb->actual_length, 76 - urb->transfer_buffer_length); 77 - if (tmp <= 0) 78 - break; 79 - tmp = min(tmp, (int)max); 80 - count += tmp; 81 - buf += tmp; 82 - max -= tmp; 83 - } 84 - return count; 85 - } 86 - 87 - static int 88 - dump_queue(struct list_head *q, char *buf, unsigned max) 89 - { 90 - int count = 0; 91 - struct musb_qh *qh; 92 - 93 - list_for_each_entry(qh, q, ring) { 94 - int tmp; 95 - 96 - tmp = dump_qh(qh, buf, max); 97 - if (tmp <= 0) 98 - break; 99 - tmp = min(tmp, (int)max); 100 - count += tmp; 101 - buf += tmp; 102 - max -= tmp; 103 - } 104 - return count; 105 - } 106 - 107 - #endif /* HCD */ 108 - 109 - #ifdef CONFIG_USB_GADGET_MUSB_HDRC 110 - static int dump_ep(struct musb_ep *ep, char *buffer, unsigned max) 111 - { 112 - char *buf = buffer; 113 - int code = 0; 114 - void __iomem *regs = ep->hw_ep->regs; 115 - char *mode = "1buf"; 116 - 117 - if (ep->is_in) { 118 - if (ep->hw_ep->tx_double_buffered) 119 - mode = "2buf"; 120 - } else { 121 - if (ep->hw_ep->rx_double_buffered) 122 - mode = "2buf"; 123 - } 124 - 125 - do { 126 - struct usb_request *req; 127 - 128 - code = snprintf(buf, max, 129 - "\n%s (hw%d): %s%s, csr %04x maxp %04x\n", 130 - ep->name, ep->current_epnum, 131 - mode, ep->dma ? " dma" : "", 132 - musb_readw(regs, 133 - (ep->is_in || !ep->current_epnum) 134 - ? MUSB_TXCSR 135 - : MUSB_RXCSR), 136 - musb_readw(regs, ep->is_in 137 - ? MUSB_TXMAXP 138 - : MUSB_RXMAXP) 139 - ); 140 - if (code <= 0) 141 - break; 142 - code = min(code, (int) max); 143 - buf += code; 144 - max -= code; 145 - 146 - if (is_cppi_enabled() && ep->current_epnum) { 147 - unsigned cppi = ep->current_epnum - 1; 148 - void __iomem *base = ep->musb->ctrl_base; 149 - unsigned off1 = cppi << 2; 150 - void __iomem *ram = base; 151 - char tmp[16]; 152 - 153 - if (ep->is_in) { 154 - ram += DAVINCI_TXCPPI_STATERAM_OFFSET(cppi); 155 - tmp[0] = 0; 156 - } else { 157 - ram += DAVINCI_RXCPPI_STATERAM_OFFSET(cppi); 158 - snprintf(tmp, sizeof tmp, "%d left, ", 159 - musb_readl(base, 160 - DAVINCI_RXCPPI_BUFCNT0_REG + off1)); 161 - } 162 - 163 - code = snprintf(buf, max, "%cX DMA%d: %s" 164 - "%08x %08x, %08x %08x; " 165 - "%08x %08x %08x .. %08x\n", 166 - ep->is_in ? 'T' : 'R', 167 - ep->current_epnum - 1, tmp, 168 - musb_readl(ram, 0 * 4), 169 - musb_readl(ram, 1 * 4), 170 - musb_readl(ram, 2 * 4), 171 - musb_readl(ram, 3 * 4), 172 - musb_readl(ram, 4 * 4), 173 - musb_readl(ram, 5 * 4), 174 - musb_readl(ram, 6 * 4), 175 - musb_readl(ram, 7 * 4)); 176 - if (code <= 0) 177 - break; 178 - code = min(code, (int) max); 179 - buf += code; 180 - max -= code; 181 - } 182 - 183 - if (list_empty(&ep->req_list)) { 184 - code = snprintf(buf, max, "\t(queue empty)\n"); 185 - if (code <= 0) 186 - break; 187 - code = min(code, (int) max); 188 - buf += code; 189 - max -= code; 190 - break; 191 - } 192 - list_for_each_entry(req, &ep->req_list, list) { 193 - code = snprintf(buf, max, "\treq %p, %s%s%d/%d\n", 194 - req, 195 - req->zero ? "zero, " : "", 196 - req->short_not_ok ? "!short, " : "", 197 - req->actual, req->length); 198 - if (code <= 0) 199 - break; 200 - code = min(code, (int) max); 201 - buf += code; 202 - max -= code; 203 - } 204 - } while (0); 205 - return buf - buffer; 206 - } 207 - #endif 208 - 209 - static int 210 - dump_end_info(struct musb *musb, u8 epnum, char *aBuffer, unsigned max) 211 - { 212 - int code = 0; 213 - char *buf = aBuffer; 214 - struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; 215 - 216 - do { 217 - musb_ep_select(musb->mregs, epnum); 218 - #ifdef CONFIG_USB_MUSB_HDRC_HCD 219 - if (is_host_active(musb)) { 220 - int dump_rx, dump_tx; 221 - void __iomem *regs = hw_ep->regs; 222 - 223 - /* TEMPORARY (!) until we have a real periodic 224 - * schedule tree ... 225 - */ 226 - if (!epnum) { 227 - /* control is shared, uses RX queue 228 - * but (mostly) shadowed tx registers 229 - */ 230 - dump_tx = !list_empty(&musb->control); 231 - dump_rx = 0; 232 - } else if (hw_ep == musb->bulk_ep) { 233 - dump_tx = !list_empty(&musb->out_bulk); 234 - dump_rx = !list_empty(&musb->in_bulk); 235 - } else if (musb->periodic[epnum]) { 236 - struct usb_host_endpoint *hep; 237 - 238 - hep = musb->periodic[epnum]->hep; 239 - dump_rx = hep->desc.bEndpointAddress 240 - & USB_ENDPOINT_DIR_MASK; 241 - dump_tx = !dump_rx; 242 - } else 243 - break; 244 - /* END TEMPORARY */ 245 - 246 - 247 - if (dump_rx) { 248 - code = snprintf(buf, max, 249 - "\nRX%d: %s rxcsr %04x interval %02x " 250 - "max %04x type %02x; " 251 - "dev %d hub %d port %d" 252 - "\n", 253 - epnum, 254 - hw_ep->rx_double_buffered 255 - ? "2buf" : "1buf", 256 - musb_readw(regs, MUSB_RXCSR), 257 - musb_readb(regs, MUSB_RXINTERVAL), 258 - musb_readw(regs, MUSB_RXMAXP), 259 - musb_readb(regs, MUSB_RXTYPE), 260 - /* FIXME: assumes multipoint */ 261 - musb_readb(musb->mregs, 262 - MUSB_BUSCTL_OFFSET(epnum, 263 - MUSB_RXFUNCADDR)), 264 - musb_readb(musb->mregs, 265 - MUSB_BUSCTL_OFFSET(epnum, 266 - MUSB_RXHUBADDR)), 267 - musb_readb(musb->mregs, 268 - MUSB_BUSCTL_OFFSET(epnum, 269 - MUSB_RXHUBPORT)) 270 - ); 271 - if (code <= 0) 272 - break; 273 - code = min(code, (int) max); 274 - buf += code; 275 - max -= code; 276 - 277 - if (is_cppi_enabled() 278 - && epnum 279 - && hw_ep->rx_channel) { 280 - unsigned cppi = epnum - 1; 281 - unsigned off1 = cppi << 2; 282 - void __iomem *base; 283 - void __iomem *ram; 284 - char tmp[16]; 285 - 286 - base = musb->ctrl_base; 287 - ram = DAVINCI_RXCPPI_STATERAM_OFFSET( 288 - cppi) + base; 289 - snprintf(tmp, sizeof tmp, "%d left, ", 290 - musb_readl(base, 291 - DAVINCI_RXCPPI_BUFCNT0_REG 292 - + off1)); 293 - 294 - code = snprintf(buf, max, 295 - " rx dma%d: %s" 296 - "%08x %08x, %08x %08x; " 297 - "%08x %08x %08x .. %08x\n", 298 - cppi, tmp, 299 - musb_readl(ram, 0 * 4), 300 - musb_readl(ram, 1 * 4), 301 - musb_readl(ram, 2 * 4), 302 - musb_readl(ram, 3 * 4), 303 - musb_readl(ram, 4 * 4), 304 - musb_readl(ram, 5 * 4), 305 - musb_readl(ram, 6 * 4), 306 - musb_readl(ram, 7 * 4)); 307 - if (code <= 0) 308 - break; 309 - code = min(code, (int) max); 310 - buf += code; 311 - max -= code; 312 - } 313 - 314 - if (hw_ep == musb->bulk_ep 315 - && !list_empty( 316 - &musb->in_bulk)) { 317 - code = dump_queue(&musb->in_bulk, 318 - buf, max); 319 - if (code <= 0) 320 - break; 321 - code = min(code, (int) max); 322 - buf += code; 323 - max -= code; 324 - } else if (musb->periodic[epnum]) { 325 - code = dump_qh(musb->periodic[epnum], 326 - buf, max); 327 - if (code <= 0) 328 - break; 329 - code = min(code, (int) max); 330 - buf += code; 331 - max -= code; 332 - } 333 - } 334 - 335 - if (dump_tx) { 336 - code = snprintf(buf, max, 337 - "\nTX%d: %s txcsr %04x interval %02x " 338 - "max %04x type %02x; " 339 - "dev %d hub %d port %d" 340 - "\n", 341 - epnum, 342 - hw_ep->tx_double_buffered 343 - ? "2buf" : "1buf", 344 - musb_readw(regs, MUSB_TXCSR), 345 - musb_readb(regs, MUSB_TXINTERVAL), 346 - musb_readw(regs, MUSB_TXMAXP), 347 - musb_readb(regs, MUSB_TXTYPE), 348 - /* FIXME: assumes multipoint */ 349 - musb_readb(musb->mregs, 350 - MUSB_BUSCTL_OFFSET(epnum, 351 - MUSB_TXFUNCADDR)), 352 - musb_readb(musb->mregs, 353 - MUSB_BUSCTL_OFFSET(epnum, 354 - MUSB_TXHUBADDR)), 355 - musb_readb(musb->mregs, 356 - MUSB_BUSCTL_OFFSET(epnum, 357 - MUSB_TXHUBPORT)) 358 - ); 359 - if (code <= 0) 360 - break; 361 - code = min(code, (int) max); 362 - buf += code; 363 - max -= code; 364 - 365 - if (is_cppi_enabled() 366 - && epnum 367 - && hw_ep->tx_channel) { 368 - unsigned cppi = epnum - 1; 369 - void __iomem *base; 370 - void __iomem *ram; 371 - 372 - base = musb->ctrl_base; 373 - ram = DAVINCI_RXCPPI_STATERAM_OFFSET( 374 - cppi) + base; 375 - code = snprintf(buf, max, 376 - " tx dma%d: " 377 - "%08x %08x, %08x %08x; " 378 - "%08x %08x %08x .. %08x\n", 379 - cppi, 380 - musb_readl(ram, 0 * 4), 381 - musb_readl(ram, 1 * 4), 382 - musb_readl(ram, 2 * 4), 383 - musb_readl(ram, 3 * 4), 384 - musb_readl(ram, 4 * 4), 385 - musb_readl(ram, 5 * 4), 386 - musb_readl(ram, 6 * 4), 387 - musb_readl(ram, 7 * 4)); 388 - if (code <= 0) 389 - break; 390 - code = min(code, (int) max); 391 - buf += code; 392 - max -= code; 393 - } 394 - 395 - if (hw_ep == musb->control_ep 396 - && !list_empty( 397 - &musb->control)) { 398 - code = dump_queue(&musb->control, 399 - buf, max); 400 - if (code <= 0) 401 - break; 402 - code = min(code, (int) max); 403 - buf += code; 404 - max -= code; 405 - } else if (hw_ep == musb->bulk_ep 406 - && !list_empty( 407 - &musb->out_bulk)) { 408 - code = dump_queue(&musb->out_bulk, 409 - buf, max); 410 - if (code <= 0) 411 - break; 412 - code = min(code, (int) max); 413 - buf += code; 414 - max -= code; 415 - } else if (musb->periodic[epnum]) { 416 - code = dump_qh(musb->periodic[epnum], 417 - buf, max); 418 - if (code <= 0) 419 - break; 420 - code = min(code, (int) max); 421 - buf += code; 422 - max -= code; 423 - } 424 - } 425 - } 426 - #endif 427 - #ifdef CONFIG_USB_GADGET_MUSB_HDRC 428 - if (is_peripheral_active(musb)) { 429 - code = 0; 430 - 431 - if (hw_ep->ep_in.desc || !epnum) { 432 - code = dump_ep(&hw_ep->ep_in, buf, max); 433 - if (code <= 0) 434 - break; 435 - code = min(code, (int) max); 436 - buf += code; 437 - max -= code; 438 - } 439 - if (hw_ep->ep_out.desc) { 440 - code = dump_ep(&hw_ep->ep_out, buf, max); 441 - if (code <= 0) 442 - break; 443 - code = min(code, (int) max); 444 - buf += code; 445 - max -= code; 446 - } 447 - } 448 - #endif 449 - } while (0); 450 - 451 - return buf - aBuffer; 452 - } 453 - 454 - /* Dump the current status and compile options. 455 - * @param musb the device driver instance 456 - * @param buffer where to dump the status; it must be big enough to hold the 457 - * result otherwise "BAD THINGS HAPPENS(TM)". 458 - */ 459 - static int dump_header_stats(struct musb *musb, char *buffer) 460 - { 461 - int code, count = 0; 462 - const void __iomem *mbase = musb->mregs; 463 - 464 - *buffer = 0; 465 - count = sprintf(buffer, "Status: %sHDRC, Mode=%s " 466 - "(Power=%02x, DevCtl=%02x)\n", 467 - (musb->is_multipoint ? "M" : ""), MUSB_MODE(musb), 468 - musb_readb(mbase, MUSB_POWER), 469 - musb_readb(mbase, MUSB_DEVCTL)); 470 - if (count <= 0) 471 - return 0; 472 - buffer += count; 473 - 474 - code = sprintf(buffer, "OTG state: %s; %sactive\n", 475 - otg_state_string(musb), 476 - musb->is_active ? "" : "in"); 477 - if (code <= 0) 478 - goto done; 479 - buffer += code; 480 - count += code; 481 - 482 - code = sprintf(buffer, 483 - "Options: " 484 - #ifdef CONFIG_MUSB_PIO_ONLY 485 - "pio" 486 - #elif defined(CONFIG_USB_TI_CPPI_DMA) 487 - "cppi-dma" 488 - #elif defined(CONFIG_USB_INVENTRA_DMA) 489 - "musb-dma" 490 - #elif defined(CONFIG_USB_TUSB_OMAP_DMA) 491 - "tusb-omap-dma" 492 - #else 493 - "?dma?" 494 - #endif 495 - ", " 496 - #ifdef CONFIG_USB_MUSB_OTG 497 - "otg (peripheral+host)" 498 - #elif defined(CONFIG_USB_GADGET_MUSB_HDRC) 499 - "peripheral" 500 - #elif defined(CONFIG_USB_MUSB_HDRC_HCD) 501 - "host" 502 - #endif 503 - ", debug=%d [eps=%d]\n", 504 - debug, 505 - musb->nr_endpoints); 506 - if (code <= 0) 507 - goto done; 508 - count += code; 509 - buffer += code; 510 - 511 - #ifdef CONFIG_USB_GADGET_MUSB_HDRC 512 - code = sprintf(buffer, "Peripheral address: %02x\n", 513 - musb_readb(musb->ctrl_base, MUSB_FADDR)); 514 - if (code <= 0) 515 - goto done; 516 - buffer += code; 517 - count += code; 518 - #endif 519 - 520 - #ifdef CONFIG_USB_MUSB_HDRC_HCD 521 - code = sprintf(buffer, "Root port status: %08x\n", 522 - musb->port1_status); 523 - if (code <= 0) 524 - goto done; 525 - buffer += code; 526 - count += code; 527 - #endif 528 - 529 - #ifdef CONFIG_ARCH_DAVINCI 530 - code = sprintf(buffer, 531 - "DaVinci: ctrl=%02x stat=%1x phy=%03x\n" 532 - "\trndis=%05x auto=%04x intsrc=%08x intmsk=%08x" 533 - "\n", 534 - musb_readl(musb->ctrl_base, DAVINCI_USB_CTRL_REG), 535 - musb_readl(musb->ctrl_base, DAVINCI_USB_STAT_REG), 536 - __raw_readl((void __force __iomem *) 537 - IO_ADDRESS(USBPHY_CTL_PADDR)), 538 - musb_readl(musb->ctrl_base, DAVINCI_RNDIS_REG), 539 - musb_readl(musb->ctrl_base, DAVINCI_AUTOREQ_REG), 540 - musb_readl(musb->ctrl_base, 541 - DAVINCI_USB_INT_SOURCE_REG), 542 - musb_readl(musb->ctrl_base, 543 - DAVINCI_USB_INT_MASK_REG)); 544 - if (code <= 0) 545 - goto done; 546 - count += code; 547 - buffer += code; 548 - #endif /* DAVINCI */ 549 - 550 - #ifdef CONFIG_USB_TUSB6010 551 - code = sprintf(buffer, 552 - "TUSB6010: devconf %08x, phy enable %08x drive %08x" 553 - "\n\totg %03x timer %08x" 554 - "\n\tprcm conf %08x mgmt %08x; int src %08x mask %08x" 555 - "\n", 556 - musb_readl(musb->ctrl_base, TUSB_DEV_CONF), 557 - musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL_ENABLE), 558 - musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL), 559 - musb_readl(musb->ctrl_base, TUSB_DEV_OTG_STAT), 560 - musb_readl(musb->ctrl_base, TUSB_DEV_OTG_TIMER), 561 - musb_readl(musb->ctrl_base, TUSB_PRCM_CONF), 562 - musb_readl(musb->ctrl_base, TUSB_PRCM_MNGMT), 563 - musb_readl(musb->ctrl_base, TUSB_INT_SRC), 564 - musb_readl(musb->ctrl_base, TUSB_INT_MASK)); 565 - if (code <= 0) 566 - goto done; 567 - count += code; 568 - buffer += code; 569 - #endif /* DAVINCI */ 570 - 571 - if (is_cppi_enabled() && musb->dma_controller) { 572 - code = sprintf(buffer, 573 - "CPPI: txcr=%d txsrc=%01x txena=%01x; " 574 - "rxcr=%d rxsrc=%01x rxena=%01x " 575 - "\n", 576 - musb_readl(musb->ctrl_base, 577 - DAVINCI_TXCPPI_CTRL_REG), 578 - musb_readl(musb->ctrl_base, 579 - DAVINCI_TXCPPI_RAW_REG), 580 - musb_readl(musb->ctrl_base, 581 - DAVINCI_TXCPPI_INTENAB_REG), 582 - musb_readl(musb->ctrl_base, 583 - DAVINCI_RXCPPI_CTRL_REG), 584 - musb_readl(musb->ctrl_base, 585 - DAVINCI_RXCPPI_RAW_REG), 586 - musb_readl(musb->ctrl_base, 587 - DAVINCI_RXCPPI_INTENAB_REG)); 588 - if (code <= 0) 589 - goto done; 590 - count += code; 591 - buffer += code; 592 - } 593 - 594 - #ifdef CONFIG_USB_GADGET_MUSB_HDRC 595 - if (is_peripheral_enabled(musb)) { 596 - code = sprintf(buffer, "Gadget driver: %s\n", 597 - musb->gadget_driver 598 - ? musb->gadget_driver->driver.name 599 - : "(none)"); 600 - if (code <= 0) 601 - goto done; 602 - count += code; 603 - buffer += code; 604 - } 605 - #endif 606 - 607 - done: 608 - return count; 609 - } 610 - 611 - /* Write to ProcFS 612 - * 613 - * C soft-connect 614 - * c soft-disconnect 615 - * I enable HS 616 - * i disable HS 617 - * s stop session 618 - * F force session (OTG-unfriendly) 619 - * E rElinquish bus (OTG) 620 - * H request host mode 621 - * h cancel host request 622 - * T start sending TEST_PACKET 623 - * D<num> set/query the debug level 624 - */ 625 - static int musb_proc_write(struct file *file, const char __user *buffer, 626 - unsigned long count, void *data) 627 - { 628 - char cmd; 629 - u8 reg; 630 - struct musb *musb = (struct musb *)data; 631 - void __iomem *mbase = musb->mregs; 632 - 633 - /* MOD_INC_USE_COUNT; */ 634 - 635 - if (unlikely(copy_from_user(&cmd, buffer, 1))) 636 - return -EFAULT; 637 - 638 - switch (cmd) { 639 - case 'C': 640 - if (mbase) { 641 - reg = musb_readb(mbase, MUSB_POWER) 642 - | MUSB_POWER_SOFTCONN; 643 - musb_writeb(mbase, MUSB_POWER, reg); 644 - } 645 - break; 646 - 647 - case 'c': 648 - if (mbase) { 649 - reg = musb_readb(mbase, MUSB_POWER) 650 - & ~MUSB_POWER_SOFTCONN; 651 - musb_writeb(mbase, MUSB_POWER, reg); 652 - } 653 - break; 654 - 655 - case 'I': 656 - if (mbase) { 657 - reg = musb_readb(mbase, MUSB_POWER) 658 - | MUSB_POWER_HSENAB; 659 - musb_writeb(mbase, MUSB_POWER, reg); 660 - } 661 - break; 662 - 663 - case 'i': 664 - if (mbase) { 665 - reg = musb_readb(mbase, MUSB_POWER) 666 - & ~MUSB_POWER_HSENAB; 667 - musb_writeb(mbase, MUSB_POWER, reg); 668 - } 669 - break; 670 - 671 - case 'F': 672 - reg = musb_readb(mbase, MUSB_DEVCTL); 673 - reg |= MUSB_DEVCTL_SESSION; 674 - musb_writeb(mbase, MUSB_DEVCTL, reg); 675 - break; 676 - 677 - case 'H': 678 - if (mbase) { 679 - reg = musb_readb(mbase, MUSB_DEVCTL); 680 - reg |= MUSB_DEVCTL_HR; 681 - musb_writeb(mbase, MUSB_DEVCTL, reg); 682 - /* MUSB_HST_MODE( ((struct musb*)data) ); */ 683 - /* WARNING("Host Mode\n"); */ 684 - } 685 - break; 686 - 687 - case 'h': 688 - if (mbase) { 689 - reg = musb_readb(mbase, MUSB_DEVCTL); 690 - reg &= ~MUSB_DEVCTL_HR; 691 - musb_writeb(mbase, MUSB_DEVCTL, reg); 692 - } 693 - break; 694 - 695 - case 'T': 696 - if (mbase) { 697 - musb_load_testpacket(musb); 698 - musb_writeb(mbase, MUSB_TESTMODE, 699 - MUSB_TEST_PACKET); 700 - } 701 - break; 702 - 703 - #if (MUSB_DEBUG > 0) 704 - /* set/read debug level */ 705 - case 'D':{ 706 - if (count > 1) { 707 - char digits[8], *p = digits; 708 - int i = 0, level = 0, sign = 1; 709 - int len = min(count - 1, (unsigned long)8); 710 - 711 - if (copy_from_user(&digits, &buffer[1], len)) 712 - return -EFAULT; 713 - 714 - /* optional sign */ 715 - if (*p == '-') { 716 - len -= 1; 717 - sign = -sign; 718 - p++; 719 - } 720 - 721 - /* read it */ 722 - while (i++ < len && *p > '0' && *p < '9') { 723 - level = level * 10 + (*p - '0'); 724 - p++; 725 - } 726 - 727 - level *= sign; 728 - DBG(1, "debug level %d\n", level); 729 - debug = level; 730 - } 731 - } 732 - break; 733 - 734 - 735 - case '?': 736 - INFO("?: you are seeing it\n"); 737 - INFO("C/c: soft connect enable/disable\n"); 738 - INFO("I/i: hispeed enable/disable\n"); 739 - INFO("F: force session start\n"); 740 - INFO("H: host mode\n"); 741 - INFO("T: start sending TEST_PACKET\n"); 742 - INFO("D: set/read dbug level\n"); 743 - break; 744 - #endif 745 - 746 - default: 747 - ERR("Command %c not implemented\n", cmd); 748 - break; 749 - } 750 - 751 - musb_platform_try_idle(musb, 0); 752 - 753 - return count; 754 - } 755 - 756 - static int musb_proc_read(char *page, char **start, 757 - off_t off, int count, int *eof, void *data) 758 - { 759 - char *buffer = page; 760 - int code = 0; 761 - unsigned long flags; 762 - struct musb *musb = data; 763 - unsigned epnum; 764 - 765 - count -= off; 766 - count -= 1; /* for NUL at end */ 767 - if (count <= 0) 768 - return -EINVAL; 769 - 770 - spin_lock_irqsave(&musb->lock, flags); 771 - 772 - code = dump_header_stats(musb, buffer); 773 - if (code > 0) { 774 - buffer += code; 775 - count -= code; 776 - } 777 - 778 - /* generate the report for the end points */ 779 - /* REVISIT ... not unless something's connected! */ 780 - for (epnum = 0; count >= 0 && epnum < musb->nr_endpoints; 781 - epnum++) { 782 - code = dump_end_info(musb, epnum, buffer, count); 783 - if (code > 0) { 784 - buffer += code; 785 - count -= code; 786 - } 787 - } 788 - 789 - musb_platform_try_idle(musb, 0); 790 - 791 - spin_unlock_irqrestore(&musb->lock, flags); 792 - *eof = 1; 793 - 794 - return buffer - page; 795 - } 796 - 797 - void __devexit musb_debug_delete(char *name, struct musb *musb) 798 - { 799 - if (musb->proc_entry) 800 - remove_proc_entry(name, NULL); 801 - } 802 - 803 - struct proc_dir_entry *__init 804 - musb_debug_create(char *name, struct musb *data) 805 - { 806 - struct proc_dir_entry *pde; 807 - 808 - /* FIXME convert everything to seq_file; then later, debugfs */ 809 - 810 - if (!name) 811 - return NULL; 812 - 813 - pde = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, NULL); 814 - data->proc_entry = pde; 815 - if (pde) { 816 - pde->data = data; 817 - /* pde->owner = THIS_MODULE; */ 818 - 819 - pde->read_proc = musb_proc_read; 820 - pde->write_proc = musb_proc_write; 821 - 822 - pde->size = 0; 823 - 824 - pr_debug("Registered /proc/%s\n", name); 825 - } else { 826 - pr_debug("Cannot create a valid proc file entry"); 827 - } 828 - 829 - return pde; 830 - }
-2
drivers/usb/serial/garmin_gps.c
··· 38 38 #include <linux/usb.h> 39 39 #include <linux/usb/serial.h> 40 40 41 - #include <linux/version.h> 42 - 43 41 /* the mode to be set when the port ist opened */ 44 42 static int initial_mode = 1; 45 43
+2
drivers/usb/serial/option.c
··· 173 173 #define KYOCERA_PRODUCT_KPC680 0x180a 174 174 175 175 #define ANYDATA_VENDOR_ID 0x16d5 176 + #define ANYDATA_PRODUCT_ADU_620UW 0x6202 176 177 #define ANYDATA_PRODUCT_ADU_E100A 0x6501 177 178 #define ANYDATA_PRODUCT_ADU_500A 0x6502 178 179 ··· 319 318 { USB_DEVICE(DELL_VENDOR_ID, 0x8138) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */ 320 319 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, 321 320 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, 321 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, 322 322 { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, 323 323 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) }, 324 324 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) },
-1
drivers/video/arkfb.c
··· 11 11 * Code is based on s3fb 12 12 */ 13 13 14 - #include <linux/version.h> 15 14 #include <linux/module.h> 16 15 #include <linux/kernel.h> 17 16 #include <linux/errno.h>
+12 -3
drivers/video/bf54x-lq043fb.c
··· 733 733 static int bfin_bf54x_suspend(struct platform_device *pdev, pm_message_t state) 734 734 { 735 735 struct fb_info *fbinfo = platform_get_drvdata(pdev); 736 - struct bfin_bf54xfb_info *info = fbinfo->par; 737 736 738 737 bfin_write_EPPI0_CONTROL(bfin_read_EPPI0_CONTROL() & ~EPPI_EN); 739 738 disable_dma(CH_EPPI0); ··· 746 747 struct fb_info *fbinfo = platform_get_drvdata(pdev); 747 748 struct bfin_bf54xfb_info *info = fbinfo->par; 748 749 749 - enable_dma(CH_EPPI0); 750 - bfin_write_EPPI0_CONTROL(bfin_read_EPPI0_CONTROL() | EPPI_EN); 750 + if (info->lq043_open_cnt) { 751 + 752 + bfin_write_EPPI0_CONTROL(0); 753 + SSYNC(); 754 + 755 + config_dma(info); 756 + config_ppi(info); 757 + 758 + /* start dma */ 759 + enable_dma(CH_EPPI0); 760 + bfin_write_EPPI0_CONTROL(bfin_read_EPPI0_CONTROL() | EPPI_EN); 761 + } 751 762 752 763 return 0; 753 764 }
+19
drivers/video/fb_defio.c
··· 114 114 .page_mkwrite = fb_deferred_io_mkwrite, 115 115 }; 116 116 117 + static int fb_deferred_io_set_page_dirty(struct page *page) 118 + { 119 + if (!PageDirty(page)) 120 + SetPageDirty(page); 121 + return 0; 122 + } 123 + 124 + static const struct address_space_operations fb_deferred_io_aops = { 125 + .set_page_dirty = fb_deferred_io_set_page_dirty, 126 + }; 127 + 117 128 static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) 118 129 { 119 130 vma->vm_ops = &fb_deferred_io_vm_ops; ··· 173 162 fbdefio->delay = HZ; 174 163 } 175 164 EXPORT_SYMBOL_GPL(fb_deferred_io_init); 165 + 166 + void fb_deferred_io_open(struct fb_info *info, 167 + struct inode *inode, 168 + struct file *file) 169 + { 170 + file->f_mapping->a_ops = &fb_deferred_io_aops; 171 + } 172 + EXPORT_SYMBOL_GPL(fb_deferred_io_open); 176 173 177 174 void fb_deferred_io_cleanup(struct fb_info *info) 178 175 {
+4
drivers/video/fbmem.c
··· 1344 1344 if (res) 1345 1345 module_put(info->fbops->owner); 1346 1346 } 1347 + #ifdef CONFIG_FB_DEFERRED_IO 1348 + if (info->fbdefio) 1349 + fb_deferred_io_open(info, inode, file); 1350 + #endif 1347 1351 out: 1348 1352 unlock_kernel(); 1349 1353 return res;
+1
drivers/video/pm2fb.c
··· 1746 1746 release_mem_region(fix->mmio_start, fix->mmio_len); 1747 1747 1748 1748 pci_set_drvdata(pdev, NULL); 1749 + fb_dealloc_cmap(&info->cmap); 1749 1750 kfree(info->pixmap.addr); 1750 1751 kfree(info); 1751 1752 }
-1
drivers/video/s3fb.c
··· 11 11 * which is based on the code of neofb. 12 12 */ 13 13 14 - #include <linux/version.h> 15 14 #include <linux/module.h> 16 15 #include <linux/kernel.h> 17 16 #include <linux/errno.h>
+2
drivers/video/sh_mobile_lcdcfb.c
··· 595 595 info->fbops = &sh_mobile_lcdc_ops; 596 596 info->var.xres = info->var.xres_virtual = cfg->lcd_cfg.xres; 597 597 info->var.yres = info->var.yres_virtual = cfg->lcd_cfg.yres; 598 + info->var.width = cfg->lcd_size_cfg.width; 599 + info->var.height = cfg->lcd_size_cfg.height; 598 600 info->var.activate = FB_ACTIVATE_NOW; 599 601 error = sh_mobile_lcdc_set_bpp(&info->var, cfg->bpp); 600 602 if (error)
-1
drivers/video/vermilion/vermilion.h
··· 30 30 #define _VERMILION_H_ 31 31 32 32 #include <linux/kernel.h> 33 - #include <linux/version.h> 34 33 #include <linux/pci.h> 35 34 #include <asm/atomic.h> 36 35 #include <linux/mutex.h>
-1
drivers/video/vt8623fb.c
··· 12 12 * (http://davesdomain.org.uk/viafb/) 13 13 */ 14 14 15 - #include <linux/version.h> 16 15 #include <linux/module.h> 17 16 #include <linux/kernel.h> 18 17 #include <linux/errno.h>
-1
drivers/video/xilinxfb.c
··· 24 24 #include <linux/device.h> 25 25 #include <linux/module.h> 26 26 #include <linux/kernel.h> 27 - #include <linux/version.h> 28 27 #include <linux/errno.h> 29 28 #include <linux/string.h> 30 29 #include <linux/mm.h>
+3 -1
fs/binfmt_flat.c
··· 914 914 /* Stash our initial stack pointer into the mm structure */ 915 915 current->mm->start_stack = (unsigned long )sp; 916 916 917 - 917 + #ifdef FLAT_PLAT_INIT 918 + FLAT_PLAT_INIT(regs); 919 + #endif 918 920 DBG_FLT("start_thread(regs=0x%x, entry=0x%x, start_stack=0x%x)\n", 919 921 (int)regs, (int)start_addr, (int)current->mm->start_stack); 920 922
+2 -2
fs/binfmt_misc.c
··· 120 120 if (bprm->misc_bang) 121 121 goto _ret; 122 122 123 - bprm->misc_bang = 1; 124 - 125 123 /* to keep locking time low, we copy the interpreter string */ 126 124 read_lock(&entries_lock); 127 125 fmt = check_file(bprm); ··· 196 198 197 199 if (retval < 0) 198 200 goto _error; 201 + 202 + bprm->misc_bang = 1; 199 203 200 204 retval = search_binary_handler (bprm, regs); 201 205 if (retval < 0)
+38 -46
fs/cramfs/inode.c
··· 43 43 static int cramfs_iget5_test(struct inode *inode, void *opaque) 44 44 { 45 45 struct cramfs_inode *cramfs_inode = opaque; 46 - 47 - if (inode->i_ino != CRAMINO(cramfs_inode)) 48 - return 0; /* does not match */ 49 - 50 - if (inode->i_ino != 1) 51 - return 1; 52 - 53 - /* all empty directories, char, block, pipe, and sock, share inode #1 */ 54 - 55 - if ((inode->i_mode != cramfs_inode->mode) || 56 - (inode->i_gid != cramfs_inode->gid) || 57 - (inode->i_uid != cramfs_inode->uid)) 58 - return 0; /* does not match */ 59 - 60 - if ((S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) && 61 - (inode->i_rdev != old_decode_dev(cramfs_inode->size))) 62 - return 0; /* does not match */ 63 - 64 - return 1; /* matches */ 46 + return inode->i_ino == CRAMINO(cramfs_inode) && inode->i_ino != 1; 65 47 } 66 48 67 49 static int cramfs_iget5_set(struct inode *inode, void *opaque) 68 50 { 69 - static struct timespec zerotime; 70 51 struct cramfs_inode *cramfs_inode = opaque; 71 - inode->i_mode = cramfs_inode->mode; 72 - inode->i_uid = cramfs_inode->uid; 73 - inode->i_size = cramfs_inode->size; 74 - inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1; 75 - inode->i_gid = cramfs_inode->gid; 76 - /* Struct copy intentional */ 77 - inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime; 78 52 inode->i_ino = CRAMINO(cramfs_inode); 79 - /* inode->i_nlink is left 1 - arguably wrong for directories, 80 - but it's the best we can do without reading the directory 81 - contents. 1 yields the right result in GNU find, even 82 - without -noleaf option. */ 83 - if (S_ISREG(inode->i_mode)) { 84 - inode->i_fop = &generic_ro_fops; 85 - inode->i_data.a_ops = &cramfs_aops; 86 - } else if (S_ISDIR(inode->i_mode)) { 87 - inode->i_op = &cramfs_dir_inode_operations; 88 - inode->i_fop = &cramfs_directory_operations; 89 - } else if (S_ISLNK(inode->i_mode)) { 90 - inode->i_op = &page_symlink_inode_operations; 91 - inode->i_data.a_ops = &cramfs_aops; 92 - } else { 93 - inode->i_size = 0; 94 - inode->i_blocks = 0; 95 - init_special_inode(inode, inode->i_mode, 96 - old_decode_dev(cramfs_inode->size)); 97 - } 98 53 return 0; 99 54 } 100 55 ··· 59 104 struct inode *inode = iget5_locked(sb, CRAMINO(cramfs_inode), 60 105 cramfs_iget5_test, cramfs_iget5_set, 61 106 cramfs_inode); 107 + static struct timespec zerotime; 108 + 62 109 if (inode && (inode->i_state & I_NEW)) { 110 + inode->i_mode = cramfs_inode->mode; 111 + inode->i_uid = cramfs_inode->uid; 112 + inode->i_size = cramfs_inode->size; 113 + inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1; 114 + inode->i_gid = cramfs_inode->gid; 115 + /* Struct copy intentional */ 116 + inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime; 117 + /* inode->i_nlink is left 1 - arguably wrong for directories, 118 + but it's the best we can do without reading the directory 119 + contents. 1 yields the right result in GNU find, even 120 + without -noleaf option. */ 121 + if (S_ISREG(inode->i_mode)) { 122 + inode->i_fop = &generic_ro_fops; 123 + inode->i_data.a_ops = &cramfs_aops; 124 + } else if (S_ISDIR(inode->i_mode)) { 125 + inode->i_op = &cramfs_dir_inode_operations; 126 + inode->i_fop = &cramfs_directory_operations; 127 + } else if (S_ISLNK(inode->i_mode)) { 128 + inode->i_op = &page_symlink_inode_operations; 129 + inode->i_data.a_ops = &cramfs_aops; 130 + } else { 131 + inode->i_size = 0; 132 + inode->i_blocks = 0; 133 + init_special_inode(inode, inode->i_mode, 134 + old_decode_dev(cramfs_inode->size)); 135 + } 63 136 unlock_new_inode(inode); 64 137 } 65 138 return inode; 139 + } 140 + 141 + static void cramfs_drop_inode(struct inode *inode) 142 + { 143 + if (inode->i_ino == 1) 144 + generic_delete_inode(inode); 145 + else 146 + generic_drop_inode(inode); 66 147 } 67 148 68 149 /* ··· 525 534 .put_super = cramfs_put_super, 526 535 .remount_fs = cramfs_remount, 527 536 .statfs = cramfs_statfs, 537 + .drop_inode = cramfs_drop_inode, 528 538 }; 529 539 530 540 static int cramfs_get_sb(struct file_system_type *fs_type,
+3
fs/ext4/balloc.c
··· 1626 1626 free_blocks = 1627 1627 percpu_counter_sum_and_set(&sbi->s_freeblocks_counter); 1628 1628 #endif 1629 + if (free_blocks <= root_blocks) 1630 + /* we don't have free space */ 1631 + return 0; 1629 1632 if (free_blocks - root_blocks < nblocks) 1630 1633 return free_blocks - root_blocks; 1631 1634 return nblocks;
+15 -5
fs/ext4/dir.c
··· 411 411 get_dtype(sb, fname->file_type)); 412 412 if (error) { 413 413 filp->f_pos = curr_pos; 414 - info->extra_fname = fname->next; 414 + info->extra_fname = fname; 415 415 return error; 416 416 } 417 417 fname = fname->next; ··· 450 450 * If there are any leftover names on the hash collision 451 451 * chain, return them first. 452 452 */ 453 - if (info->extra_fname && 454 - call_filldir(filp, dirent, filldir, info->extra_fname)) 455 - goto finished; 453 + if (info->extra_fname) { 454 + if (call_filldir(filp, dirent, filldir, info->extra_fname)) 455 + goto finished; 456 456 457 - if (!info->curr_node) 457 + info->extra_fname = NULL; 458 + info->curr_node = rb_next(info->curr_node); 459 + if (!info->curr_node) { 460 + if (info->next_hash == ~0) { 461 + filp->f_pos = EXT4_HTREE_EOF; 462 + goto finished; 463 + } 464 + info->curr_hash = info->next_hash; 465 + info->curr_minor_hash = 0; 466 + } 467 + } else if (!info->curr_node) 458 468 info->curr_node = rb_first(&info->root); 459 469 460 470 while (1) {
+4
fs/ext4/ext4.h
··· 1072 1072 extern void ext4_get_inode_flags(struct ext4_inode_info *); 1073 1073 extern void ext4_set_aops(struct inode *inode); 1074 1074 extern int ext4_writepage_trans_blocks(struct inode *); 1075 + extern int ext4_meta_trans_blocks(struct inode *, int nrblocks, int idxblocks); 1076 + extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); 1075 1077 extern int ext4_block_truncate_page(handle_t *handle, 1076 1078 struct address_space *mapping, loff_t from); 1077 1079 extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page); ··· 1229 1227 /* extents.c */ 1230 1228 extern int ext4_ext_tree_init(handle_t *handle, struct inode *); 1231 1229 extern int ext4_ext_writepage_trans_blocks(struct inode *, int); 1230 + extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, 1231 + int chunk); 1232 1232 extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, 1233 1233 ext4_lblk_t iblock, 1234 1234 unsigned long max_blocks, struct buffer_head *bh_result,
+3 -1
fs/ext4/ext4_extents.h
··· 216 216 extern ext4_fsblk_t idx_pblock(struct ext4_extent_idx *); 217 217 extern void ext4_ext_store_pblock(struct ext4_extent *, ext4_fsblk_t); 218 218 extern int ext4_extent_tree_init(handle_t *, struct inode *); 219 - extern int ext4_ext_calc_credits_for_insert(struct inode *, struct ext4_ext_path *); 219 + extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode, 220 + int num, 221 + struct ext4_ext_path *path); 220 222 extern int ext4_ext_try_to_merge(struct inode *inode, 221 223 struct ext4_ext_path *path, 222 224 struct ext4_extent *);
+8
fs/ext4/ext4_jbd2.h
··· 51 51 EXT4_XATTR_TRANS_BLOCKS - 2 + \ 52 52 2*EXT4_QUOTA_TRANS_BLOCKS(sb)) 53 53 54 + /* 55 + * Define the number of metadata blocks we need to account to modify data. 56 + * 57 + * This include super block, inode block, quota blocks and xattr blocks 58 + */ 59 + #define EXT4_META_TRANS_BLOCKS(sb) (EXT4_XATTR_TRANS_BLOCKS + \ 60 + 2*EXT4_QUOTA_TRANS_BLOCKS(sb)) 61 + 54 62 /* Delete operations potentially hit one directory's namespace plus an 55 63 * entire inode, plus arbitrary amounts of bitmap/indirection data. Be 56 64 * generous. We can grow the delete transaction later if necessary. */
+48 -65
fs/ext4/extents.c
··· 1747 1747 } 1748 1748 1749 1749 /* 1750 - * ext4_ext_calc_credits_for_insert: 1751 - * This routine returns max. credits that the extent tree can consume. 1752 - * It should be OK for low-performance paths like ->writepage() 1753 - * To allow many writing processes to fit into a single transaction, 1754 - * the caller should calculate credits under i_data_sem and 1755 - * pass the actual path. 1750 + * ext4_ext_calc_credits_for_single_extent: 1751 + * This routine returns max. credits that needed to insert an extent 1752 + * to the extent tree. 1753 + * When pass the actual path, the caller should calculate credits 1754 + * under i_data_sem. 1756 1755 */ 1757 - int ext4_ext_calc_credits_for_insert(struct inode *inode, 1756 + int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 1758 1757 struct ext4_ext_path *path) 1759 1758 { 1760 - int depth, needed; 1761 - 1762 1759 if (path) { 1760 + int depth = ext_depth(inode); 1761 + int ret = 0; 1762 + 1763 1763 /* probably there is space in leaf? */ 1764 - depth = ext_depth(inode); 1765 1764 if (le16_to_cpu(path[depth].p_hdr->eh_entries) 1766 - < le16_to_cpu(path[depth].p_hdr->eh_max)) 1767 - return 1; 1765 + < le16_to_cpu(path[depth].p_hdr->eh_max)) { 1766 + 1767 + /* 1768 + * There are some space in the leaf tree, no 1769 + * need to account for leaf block credit 1770 + * 1771 + * bitmaps and block group descriptor blocks 1772 + * and other metadat blocks still need to be 1773 + * accounted. 1774 + */ 1775 + /* 1 bitmap, 1 block group descriptor */ 1776 + ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 1777 + } 1768 1778 } 1769 1779 1770 - /* 1771 - * given 32-bit logical block (4294967296 blocks), max. tree 1772 - * can be 4 levels in depth -- 4 * 340^4 == 53453440000. 1773 - * Let's also add one more level for imbalance. 1774 - */ 1775 - depth = 5; 1780 + return ext4_chunk_trans_blocks(inode, nrblocks); 1781 + } 1776 1782 1777 - /* allocation of new data block(s) */ 1778 - needed = 2; 1783 + /* 1784 + * How many index/leaf blocks need to change/allocate to modify nrblocks? 1785 + * 1786 + * if nrblocks are fit in a single extent (chunk flag is 1), then 1787 + * in the worse case, each tree level index/leaf need to be changed 1788 + * if the tree split due to insert a new extent, then the old tree 1789 + * index/leaf need to be updated too 1790 + * 1791 + * If the nrblocks are discontiguous, they could cause 1792 + * the whole tree split more than once, but this is really rare. 1793 + */ 1794 + int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 1795 + { 1796 + int index; 1797 + int depth = ext_depth(inode); 1779 1798 1780 - /* 1781 - * tree can be full, so it would need to grow in depth: 1782 - * we need one credit to modify old root, credits for 1783 - * new root will be added in split accounting 1784 - */ 1785 - needed += 1; 1799 + if (chunk) 1800 + index = depth * 2; 1801 + else 1802 + index = depth * 3; 1786 1803 1787 - /* 1788 - * Index split can happen, we would need: 1789 - * allocate intermediate indexes (bitmap + group) 1790 - * + change two blocks at each level, but root (already included) 1791 - */ 1792 - needed += (depth * 2) + (depth * 2); 1793 - 1794 - /* any allocation modifies superblock */ 1795 - needed += 1; 1796 - 1797 - return needed; 1804 + return index; 1798 1805 } 1799 1806 1800 1807 static int ext4_remove_blocks(handle_t *handle, struct inode *inode, ··· 1928 1921 correct_index = 1; 1929 1922 credits += (ext_depth(inode)) + 1; 1930 1923 } 1931 - #ifdef CONFIG_QUOTA 1932 1924 credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); 1933 - #endif 1934 1925 1935 1926 err = ext4_ext_journal_restart(handle, credits); 1936 1927 if (err) ··· 2810 2805 /* 2811 2806 * probably first extent we're gonna free will be last in block 2812 2807 */ 2813 - err = ext4_writepage_trans_blocks(inode) + 3; 2808 + err = ext4_writepage_trans_blocks(inode); 2814 2809 handle = ext4_journal_start(inode, err); 2815 2810 if (IS_ERR(handle)) 2816 2811 return; ··· 2824 2819 down_write(&EXT4_I(inode)->i_data_sem); 2825 2820 ext4_ext_invalidate_cache(inode); 2826 2821 2827 - ext4_mb_discard_inode_preallocations(inode); 2822 + ext4_discard_reservation(inode); 2828 2823 2829 2824 /* 2830 2825 * TODO: optimization is possible here. ··· 2861 2856 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 2862 2857 ext4_mark_inode_dirty(handle, inode); 2863 2858 ext4_journal_stop(handle); 2864 - } 2865 - 2866 - /* 2867 - * ext4_ext_writepage_trans_blocks: 2868 - * calculate max number of blocks we could modify 2869 - * in order to allocate new block for an inode 2870 - */ 2871 - int ext4_ext_writepage_trans_blocks(struct inode *inode, int num) 2872 - { 2873 - int needed; 2874 - 2875 - needed = ext4_ext_calc_credits_for_insert(inode, NULL); 2876 - 2877 - /* caller wants to allocate num blocks, but note it includes sb */ 2878 - needed = needed * num - (num - 1); 2879 - 2880 - #ifdef CONFIG_QUOTA 2881 - needed += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); 2882 - #endif 2883 - 2884 - return needed; 2885 2859 } 2886 2860 2887 2861 static void ext4_falloc_update_inode(struct inode *inode, ··· 2923 2939 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) 2924 2940 - block; 2925 2941 /* 2926 - * credits to insert 1 extent into extent tree + buffers to be able to 2927 - * modify 1 super block, 1 block bitmap and 1 group descriptor. 2942 + * credits to insert 1 extent into extent tree 2928 2943 */ 2929 - credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 3; 2944 + credits = ext4_chunk_trans_blocks(inode, max_blocks); 2930 2945 mutex_lock(&inode->i_mutex); 2931 2946 retry: 2932 2947 while (ret >= 0 && ret < max_blocks) {
+1 -1
fs/ext4/ialloc.c
··· 351 351 goto found_flexbg; 352 352 } 353 353 354 - if (best_flex < 0 || 354 + if (flex_group[best_flex].free_inodes == 0 || 355 355 (flex_group[i].free_blocks > 356 356 flex_group[best_flex].free_blocks && 357 357 flex_group[i].free_inodes))
+326 -162
fs/ext4/inode.c
··· 41 41 #include "acl.h" 42 42 #include "ext4_extents.h" 43 43 44 + #define MPAGE_DA_EXTENT_TAIL 0x01 45 + 44 46 static inline int ext4_begin_ordered_truncate(struct inode *inode, 45 47 loff_t new_size) 46 48 { ··· 1007 1005 */ 1008 1006 static int ext4_calc_metadata_amount(struct inode *inode, int blocks) 1009 1007 { 1008 + if (!blocks) 1009 + return 0; 1010 + 1010 1011 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) 1011 1012 return ext4_ext_calc_metadata_amount(inode, blocks); 1012 1013 ··· 1045 1040 EXT4_I(inode)->i_allocated_meta_blocks = 0; 1046 1041 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1047 1042 } 1048 - 1049 - /* Maximum number of blocks we map for direct IO at once. */ 1050 - #define DIO_MAX_BLOCKS 4096 1051 - /* 1052 - * Number of credits we need for writing DIO_MAX_BLOCKS: 1053 - * We need sb + group descriptor + bitmap + inode -> 4 1054 - * For B blocks with A block pointers per block we need: 1055 - * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect). 1056 - * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25. 1057 - */ 1058 - #define DIO_CREDITS 25 1059 - 1060 1043 1061 1044 /* 1062 1045 * The ext4_get_blocks_wrap() function try to look up the requested blocks, ··· 1157 1164 return retval; 1158 1165 } 1159 1166 1167 + /* Maximum number of blocks we map for direct IO at once. */ 1168 + #define DIO_MAX_BLOCKS 4096 1169 + 1160 1170 static int ext4_get_block(struct inode *inode, sector_t iblock, 1161 1171 struct buffer_head *bh_result, int create) 1162 1172 { 1163 1173 handle_t *handle = ext4_journal_current_handle(); 1164 1174 int ret = 0, started = 0; 1165 1175 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 1176 + int dio_credits; 1166 1177 1167 1178 if (create && !handle) { 1168 1179 /* Direct IO write... */ 1169 1180 if (max_blocks > DIO_MAX_BLOCKS) 1170 1181 max_blocks = DIO_MAX_BLOCKS; 1171 - handle = ext4_journal_start(inode, DIO_CREDITS + 1172 - 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb)); 1182 + dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); 1183 + handle = ext4_journal_start(inode, dio_credits); 1173 1184 if (IS_ERR(handle)) { 1174 1185 ret = PTR_ERR(handle); 1175 1186 goto out; ··· 1556 1559 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1557 1560 int total, mdb, mdb_free, release; 1558 1561 1562 + if (!to_free) 1563 + return; /* Nothing to release, exit */ 1564 + 1559 1565 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1566 + 1567 + if (!EXT4_I(inode)->i_reserved_data_blocks) { 1568 + /* 1569 + * if there is no reserved blocks, but we try to free some 1570 + * then the counter is messed up somewhere. 1571 + * but since this function is called from invalidate 1572 + * page, it's harmless to return without any action 1573 + */ 1574 + printk(KERN_INFO "ext4 delalloc try to release %d reserved " 1575 + "blocks for inode %lu, but there is no reserved " 1576 + "data blocks\n", to_free, inode->i_ino); 1577 + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1578 + return; 1579 + } 1580 + 1560 1581 /* recalculate the number of metablocks still need to be reserved */ 1561 1582 total = EXT4_I(inode)->i_reserved_data_blocks - to_free; 1562 1583 mdb = ext4_calc_metadata_amount(inode, total); ··· 1628 1613 unsigned long first_page, next_page; /* extent of pages */ 1629 1614 get_block_t *get_block; 1630 1615 struct writeback_control *wbc; 1616 + int io_done; 1617 + long pages_written; 1631 1618 }; 1632 1619 1633 1620 /* 1634 1621 * mpage_da_submit_io - walks through extent of pages and try to write 1635 - * them with __mpage_writepage() 1622 + * them with writepage() call back 1636 1623 * 1637 1624 * @mpd->inode: inode 1638 1625 * @mpd->first_page: first page of the extent ··· 1649 1632 static int mpage_da_submit_io(struct mpage_da_data *mpd) 1650 1633 { 1651 1634 struct address_space *mapping = mpd->inode->i_mapping; 1652 - struct mpage_data mpd_pp = { 1653 - .bio = NULL, 1654 - .last_block_in_bio = 0, 1655 - .get_block = mpd->get_block, 1656 - .use_writepage = 1, 1657 - }; 1658 1635 int ret = 0, err, nr_pages, i; 1659 1636 unsigned long index, end; 1660 1637 struct pagevec pvec; 1661 1638 1662 1639 BUG_ON(mpd->next_page <= mpd->first_page); 1663 - 1664 1640 pagevec_init(&pvec, 0); 1665 1641 index = mpd->first_page; 1666 1642 end = mpd->next_page - 1; ··· 1671 1661 break; 1672 1662 index++; 1673 1663 1674 - err = __mpage_writepage(page, mpd->wbc, &mpd_pp); 1675 - 1664 + err = mapping->a_ops->writepage(page, mpd->wbc); 1665 + if (!err) 1666 + mpd->pages_written++; 1676 1667 /* 1677 1668 * In error case, we have to continue because 1678 1669 * remaining pages are still locked ··· 1684 1673 } 1685 1674 pagevec_release(&pvec); 1686 1675 } 1687 - if (mpd_pp.bio) 1688 - mpage_bio_submit(WRITE, mpd_pp.bio); 1689 - 1690 1676 return ret; 1691 1677 } 1692 1678 ··· 1706 1698 int blocks = exbh->b_size >> inode->i_blkbits; 1707 1699 sector_t pblock = exbh->b_blocknr, cur_logical; 1708 1700 struct buffer_head *head, *bh; 1709 - unsigned long index, end; 1701 + pgoff_t index, end; 1710 1702 struct pagevec pvec; 1711 1703 int nr_pages, i; 1712 1704 ··· 1749 1741 if (buffer_delay(bh)) { 1750 1742 bh->b_blocknr = pblock; 1751 1743 clear_buffer_delay(bh); 1744 + bh->b_bdev = inode->i_sb->s_bdev; 1745 + } else if (buffer_unwritten(bh)) { 1746 + bh->b_blocknr = pblock; 1747 + clear_buffer_unwritten(bh); 1748 + set_buffer_mapped(bh); 1749 + set_buffer_new(bh); 1750 + bh->b_bdev = inode->i_sb->s_bdev; 1752 1751 } else if (buffer_mapped(bh)) 1753 1752 BUG_ON(bh->b_blocknr != pblock); 1754 1753 ··· 1791 1776 * 1792 1777 * The function skips space we know is already mapped to disk blocks. 1793 1778 * 1794 - * The function ignores errors ->get_block() returns, thus real 1795 - * error handling is postponed to __mpage_writepage() 1796 1779 */ 1797 1780 static void mpage_da_map_blocks(struct mpage_da_data *mpd) 1798 1781 { 1782 + int err = 0; 1799 1783 struct buffer_head *lbh = &mpd->lbh; 1800 - int err = 0, remain = lbh->b_size; 1801 1784 sector_t next = lbh->b_blocknr; 1802 1785 struct buffer_head new; 1803 1786 ··· 1805 1792 if (buffer_mapped(lbh) && !buffer_delay(lbh)) 1806 1793 return; 1807 1794 1808 - while (remain) { 1809 - new.b_state = lbh->b_state; 1810 - new.b_blocknr = 0; 1811 - new.b_size = remain; 1812 - err = mpd->get_block(mpd->inode, next, &new, 1); 1813 - if (err) { 1814 - /* 1815 - * Rather than implement own error handling 1816 - * here, we just leave remaining blocks 1817 - * unallocated and try again with ->writepage() 1818 - */ 1819 - break; 1820 - } 1821 - BUG_ON(new.b_size == 0); 1795 + new.b_state = lbh->b_state; 1796 + new.b_blocknr = 0; 1797 + new.b_size = lbh->b_size; 1822 1798 1823 - if (buffer_new(&new)) 1824 - __unmap_underlying_blocks(mpd->inode, &new); 1799 + /* 1800 + * If we didn't accumulate anything 1801 + * to write simply return 1802 + */ 1803 + if (!new.b_size) 1804 + return; 1805 + err = mpd->get_block(mpd->inode, next, &new, 1); 1806 + if (err) 1807 + return; 1808 + BUG_ON(new.b_size == 0); 1825 1809 1826 - /* 1827 - * If blocks are delayed marked, we need to 1828 - * put actual blocknr and drop delayed bit 1829 - */ 1830 - if (buffer_delay(lbh)) 1831 - mpage_put_bnr_to_bhs(mpd, next, &new); 1810 + if (buffer_new(&new)) 1811 + __unmap_underlying_blocks(mpd->inode, &new); 1832 1812 1833 - /* go for the remaining blocks */ 1834 - next += new.b_size >> mpd->inode->i_blkbits; 1835 - remain -= new.b_size; 1836 - } 1813 + /* 1814 + * If blocks are delayed marked, we need to 1815 + * put actual blocknr and drop delayed bit 1816 + */ 1817 + if (buffer_delay(lbh) || buffer_unwritten(lbh)) 1818 + mpage_put_bnr_to_bhs(mpd, next, &new); 1819 + 1820 + return; 1837 1821 } 1838 1822 1839 - #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | (1 << BH_Delay)) 1823 + #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 1824 + (1 << BH_Delay) | (1 << BH_Unwritten)) 1840 1825 1841 1826 /* 1842 1827 * mpage_add_bh_to_extent - try to add one more block to extent of blocks ··· 1848 1837 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, 1849 1838 sector_t logical, struct buffer_head *bh) 1850 1839 { 1851 - struct buffer_head *lbh = &mpd->lbh; 1852 1840 sector_t next; 1841 + size_t b_size = bh->b_size; 1842 + struct buffer_head *lbh = &mpd->lbh; 1843 + int nrblocks = lbh->b_size >> mpd->inode->i_blkbits; 1853 1844 1854 - next = lbh->b_blocknr + (lbh->b_size >> mpd->inode->i_blkbits); 1855 - 1845 + /* check if thereserved journal credits might overflow */ 1846 + if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) { 1847 + if (nrblocks >= EXT4_MAX_TRANS_DATA) { 1848 + /* 1849 + * With non-extent format we are limited by the journal 1850 + * credit available. Total credit needed to insert 1851 + * nrblocks contiguous blocks is dependent on the 1852 + * nrblocks. So limit nrblocks. 1853 + */ 1854 + goto flush_it; 1855 + } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > 1856 + EXT4_MAX_TRANS_DATA) { 1857 + /* 1858 + * Adding the new buffer_head would make it cross the 1859 + * allowed limit for which we have journal credit 1860 + * reserved. So limit the new bh->b_size 1861 + */ 1862 + b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << 1863 + mpd->inode->i_blkbits; 1864 + /* we will do mpage_da_submit_io in the next loop */ 1865 + } 1866 + } 1856 1867 /* 1857 1868 * First block in the extent 1858 1869 */ 1859 1870 if (lbh->b_size == 0) { 1860 1871 lbh->b_blocknr = logical; 1861 - lbh->b_size = bh->b_size; 1872 + lbh->b_size = b_size; 1862 1873 lbh->b_state = bh->b_state & BH_FLAGS; 1863 1874 return; 1864 1875 } 1865 1876 1877 + next = lbh->b_blocknr + nrblocks; 1866 1878 /* 1867 1879 * Can we merge the block to our big extent? 1868 1880 */ 1869 1881 if (logical == next && (bh->b_state & BH_FLAGS) == lbh->b_state) { 1870 - lbh->b_size += bh->b_size; 1882 + lbh->b_size += b_size; 1871 1883 return; 1872 1884 } 1873 1885 1886 + flush_it: 1874 1887 /* 1875 1888 * We couldn't merge the block to our extent, so we 1876 1889 * need to flush current extent and start new one 1877 1890 */ 1878 1891 mpage_da_map_blocks(mpd); 1879 - 1880 - /* 1881 - * Now start a new extent 1882 - */ 1883 - lbh->b_size = bh->b_size; 1884 - lbh->b_state = bh->b_state & BH_FLAGS; 1885 - lbh->b_blocknr = logical; 1892 + mpage_da_submit_io(mpd); 1893 + mpd->io_done = 1; 1894 + return; 1886 1895 } 1887 1896 1888 1897 /* ··· 1922 1891 struct buffer_head *bh, *head, fake; 1923 1892 sector_t logical; 1924 1893 1894 + if (mpd->io_done) { 1895 + /* 1896 + * Rest of the page in the page_vec 1897 + * redirty then and skip then. We will 1898 + * try to to write them again after 1899 + * starting a new transaction 1900 + */ 1901 + redirty_page_for_writepage(wbc, page); 1902 + unlock_page(page); 1903 + return MPAGE_DA_EXTENT_TAIL; 1904 + } 1925 1905 /* 1926 1906 * Can we merge this page to current extent? 1927 1907 */ 1928 1908 if (mpd->next_page != page->index) { 1929 1909 /* 1930 1910 * Nope, we can't. So, we map non-allocated blocks 1931 - * and start IO on them using __mpage_writepage() 1911 + * and start IO on them using writepage() 1932 1912 */ 1933 1913 if (mpd->next_page != mpd->first_page) { 1934 1914 mpage_da_map_blocks(mpd); 1935 1915 mpage_da_submit_io(mpd); 1916 + /* 1917 + * skip rest of the page in the page_vec 1918 + */ 1919 + mpd->io_done = 1; 1920 + redirty_page_for_writepage(wbc, page); 1921 + unlock_page(page); 1922 + return MPAGE_DA_EXTENT_TAIL; 1936 1923 } 1937 1924 1938 1925 /* ··· 1981 1932 set_buffer_dirty(bh); 1982 1933 set_buffer_uptodate(bh); 1983 1934 mpage_add_bh_to_extent(mpd, logical, bh); 1935 + if (mpd->io_done) 1936 + return MPAGE_DA_EXTENT_TAIL; 1984 1937 } else { 1985 1938 /* 1986 1939 * Page with regular buffer heads, just add all dirty ones ··· 1991 1940 bh = head; 1992 1941 do { 1993 1942 BUG_ON(buffer_locked(bh)); 1994 - if (buffer_dirty(bh)) 1943 + if (buffer_dirty(bh) && 1944 + (!buffer_mapped(bh) || buffer_delay(bh))) { 1995 1945 mpage_add_bh_to_extent(mpd, logical, bh); 1946 + if (mpd->io_done) 1947 + return MPAGE_DA_EXTENT_TAIL; 1948 + } 1996 1949 logical++; 1997 1950 } while ((bh = bh->b_this_page) != head); 1998 1951 } ··· 2015 1960 * 2016 1961 * This is a library function, which implements the writepages() 2017 1962 * address_space_operation. 2018 - * 2019 - * In order to avoid duplication of logic that deals with partial pages, 2020 - * multiple bio per page, etc, we find non-allocated blocks, allocate 2021 - * them with minimal calls to ->get_block() and re-use __mpage_writepage() 2022 - * 2023 - * It's important that we call __mpage_writepage() only once for each 2024 - * involved page, otherwise we'd have to implement more complicated logic 2025 - * to deal with pages w/o PG_lock or w/ PG_writeback and so on. 2026 - * 2027 - * See comments to mpage_writepages() 2028 1963 */ 2029 1964 static int mpage_da_writepages(struct address_space *mapping, 2030 1965 struct writeback_control *wbc, 2031 1966 get_block_t get_block) 2032 1967 { 2033 1968 struct mpage_da_data mpd; 1969 + long to_write; 2034 1970 int ret; 2035 1971 2036 1972 if (!get_block) ··· 2035 1989 mpd.first_page = 0; 2036 1990 mpd.next_page = 0; 2037 1991 mpd.get_block = get_block; 1992 + mpd.io_done = 0; 1993 + mpd.pages_written = 0; 1994 + 1995 + to_write = wbc->nr_to_write; 2038 1996 2039 1997 ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, &mpd); 2040 1998 2041 1999 /* 2042 2000 * Handle last extent of pages 2043 2001 */ 2044 - if (mpd.next_page != mpd.first_page) { 2002 + if (!mpd.io_done && mpd.next_page != mpd.first_page) { 2045 2003 mpage_da_map_blocks(&mpd); 2046 2004 mpage_da_submit_io(&mpd); 2047 2005 } 2048 2006 2007 + wbc->nr_to_write = to_write - mpd.pages_written; 2049 2008 return ret; 2050 2009 } 2051 2010 ··· 2255 2204 } 2256 2205 2257 2206 /* 2258 - * For now just follow the DIO way to estimate the max credits 2259 - * needed to write out EXT4_MAX_WRITEBACK_PAGES. 2260 - * todo: need to calculate the max credits need for 2261 - * extent based files, currently the DIO credits is based on 2262 - * indirect-blocks mapping way. 2263 - * 2264 - * Probably should have a generic way to calculate credits 2265 - * for DIO, writepages, and truncate 2207 + * This is called via ext4_da_writepages() to 2208 + * calulate the total number of credits to reserve to fit 2209 + * a single extent allocation into a single transaction, 2210 + * ext4_da_writpeages() will loop calling this before 2211 + * the block allocation. 2266 2212 */ 2267 - #define EXT4_MAX_WRITEBACK_PAGES DIO_MAX_BLOCKS 2268 - #define EXT4_MAX_WRITEBACK_CREDITS DIO_CREDITS 2213 + 2214 + static int ext4_da_writepages_trans_blocks(struct inode *inode) 2215 + { 2216 + int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 2217 + 2218 + /* 2219 + * With non-extent format the journal credit needed to 2220 + * insert nrblocks contiguous block is dependent on 2221 + * number of contiguous block. So we will limit 2222 + * number of contiguous block to a sane value 2223 + */ 2224 + if (!(inode->i_flags & EXT4_EXTENTS_FL) && 2225 + (max_blocks > EXT4_MAX_TRANS_DATA)) 2226 + max_blocks = EXT4_MAX_TRANS_DATA; 2227 + 2228 + return ext4_chunk_trans_blocks(inode, max_blocks); 2229 + } 2269 2230 2270 2231 static int ext4_da_writepages(struct address_space *mapping, 2271 - struct writeback_control *wbc) 2232 + struct writeback_control *wbc) 2272 2233 { 2273 - struct inode *inode = mapping->host; 2274 2234 handle_t *handle = NULL; 2275 - int needed_blocks; 2276 - int ret = 0; 2277 - long to_write; 2278 2235 loff_t range_start = 0; 2236 + struct inode *inode = mapping->host; 2237 + int needed_blocks, ret = 0, nr_to_writebump = 0; 2238 + long to_write, pages_skipped = 0; 2239 + struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2279 2240 2280 2241 /* 2281 2242 * No pages to write? This is mainly a kludge to avoid starting 2282 2243 * a transaction for special inodes like journal inode on last iput() 2283 2244 * because that could violate lock ordering on umount 2284 2245 */ 2285 - if (!mapping->nrpages) 2246 + if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2286 2247 return 0; 2287 - 2288 2248 /* 2289 - * Estimate the worse case needed credits to write out 2290 - * EXT4_MAX_BUF_BLOCKS pages 2249 + * Make sure nr_to_write is >= sbi->s_mb_stream_request 2250 + * This make sure small files blocks are allocated in 2251 + * single attempt. This ensure that small files 2252 + * get less fragmented. 2291 2253 */ 2292 - needed_blocks = EXT4_MAX_WRITEBACK_CREDITS; 2254 + if (wbc->nr_to_write < sbi->s_mb_stream_request) { 2255 + nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write; 2256 + wbc->nr_to_write = sbi->s_mb_stream_request; 2257 + } 2293 2258 2294 - to_write = wbc->nr_to_write; 2295 - if (!wbc->range_cyclic) { 2259 + if (!wbc->range_cyclic) 2296 2260 /* 2297 2261 * If range_cyclic is not set force range_cont 2298 2262 * and save the old writeback_index 2299 2263 */ 2300 2264 wbc->range_cont = 1; 2301 - range_start = wbc->range_start; 2302 - } 2303 2265 2304 - while (!ret && to_write) { 2266 + range_start = wbc->range_start; 2267 + pages_skipped = wbc->pages_skipped; 2268 + 2269 + restart_loop: 2270 + to_write = wbc->nr_to_write; 2271 + while (!ret && to_write > 0) { 2272 + 2273 + /* 2274 + * we insert one extent at a time. So we need 2275 + * credit needed for single extent allocation. 2276 + * journalled mode is currently not supported 2277 + * by delalloc 2278 + */ 2279 + BUG_ON(ext4_should_journal_data(inode)); 2280 + needed_blocks = ext4_da_writepages_trans_blocks(inode); 2281 + 2305 2282 /* start a new transaction*/ 2306 2283 handle = ext4_journal_start(inode, needed_blocks); 2307 2284 if (IS_ERR(handle)) { 2308 2285 ret = PTR_ERR(handle); 2286 + printk(KERN_EMERG "%s: jbd2_start: " 2287 + "%ld pages, ino %lu; err %d\n", __func__, 2288 + wbc->nr_to_write, inode->i_ino, ret); 2289 + dump_stack(); 2309 2290 goto out_writepages; 2310 2291 } 2311 2292 if (ext4_should_order_data(inode)) { 2312 2293 /* 2313 2294 * With ordered mode we need to add 2314 - * the inode to the journal handle 2295 + * the inode to the journal handl 2315 2296 * when we do block allocation. 2316 2297 */ 2317 2298 ret = ext4_jbd2_file_inode(handle, inode); ··· 2351 2268 ext4_journal_stop(handle); 2352 2269 goto out_writepages; 2353 2270 } 2354 - 2355 2271 } 2356 - /* 2357 - * set the max dirty pages could be write at a time 2358 - * to fit into the reserved transaction credits 2359 - */ 2360 - if (wbc->nr_to_write > EXT4_MAX_WRITEBACK_PAGES) 2361 - wbc->nr_to_write = EXT4_MAX_WRITEBACK_PAGES; 2362 2272 2363 2273 to_write -= wbc->nr_to_write; 2364 2274 ret = mpage_da_writepages(mapping, wbc, 2365 - ext4_da_get_block_write); 2275 + ext4_da_get_block_write); 2366 2276 ext4_journal_stop(handle); 2367 - if (wbc->nr_to_write) { 2277 + if (ret == MPAGE_DA_EXTENT_TAIL) { 2278 + /* 2279 + * got one extent now try with 2280 + * rest of the pages 2281 + */ 2282 + to_write += wbc->nr_to_write; 2283 + ret = 0; 2284 + } else if (wbc->nr_to_write) { 2368 2285 /* 2369 2286 * There is no more writeout needed 2370 2287 * or we requested for a noblocking writeout ··· 2376 2293 wbc->nr_to_write = to_write; 2377 2294 } 2378 2295 2379 - out_writepages: 2380 - wbc->nr_to_write = to_write; 2381 - if (range_start) 2296 + if (wbc->range_cont && (pages_skipped != wbc->pages_skipped)) { 2297 + /* We skipped pages in this loop */ 2382 2298 wbc->range_start = range_start; 2299 + wbc->nr_to_write = to_write + 2300 + wbc->pages_skipped - pages_skipped; 2301 + wbc->pages_skipped = pages_skipped; 2302 + goto restart_loop; 2303 + } 2304 + 2305 + out_writepages: 2306 + wbc->nr_to_write = to_write - nr_to_writebump; 2307 + wbc->range_start = range_start; 2383 2308 return ret; 2384 2309 } 2385 2310 ··· 3577 3486 * modify the block allocation tree. 3578 3487 */ 3579 3488 down_write(&ei->i_data_sem); 3489 + 3490 + ext4_discard_reservation(inode); 3491 + 3580 3492 /* 3581 3493 * The orphan list entry will now protect us from any crash which 3582 3494 * occurs before the truncate completes, so it is now safe to propagate ··· 3648 3554 case EXT4_TIND_BLOCK: 3649 3555 ; 3650 3556 } 3651 - 3652 - ext4_discard_reservation(inode); 3653 3557 3654 3558 up_write(&ei->i_data_sem); 3655 3559 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); ··· 4416 4324 return 0; 4417 4325 } 4418 4326 4419 - /* 4420 - * How many blocks doth make a writepage()? 4421 - * 4422 - * With N blocks per page, it may be: 4423 - * N data blocks 4424 - * 2 indirect block 4425 - * 2 dindirect 4426 - * 1 tindirect 4427 - * N+5 bitmap blocks (from the above) 4428 - * N+5 group descriptor summary blocks 4429 - * 1 inode block 4430 - * 1 superblock. 4431 - * 2 * EXT4_SINGLEDATA_TRANS_BLOCKS for the quote files 4432 - * 4433 - * 3 * (N + 5) + 2 + 2 * EXT4_SINGLEDATA_TRANS_BLOCKS 4434 - * 4435 - * With ordered or writeback data it's the same, less the N data blocks. 4436 - * 4437 - * If the inode's direct blocks can hold an integral number of pages then a 4438 - * page cannot straddle two indirect blocks, and we can only touch one indirect 4439 - * and dindirect block, and the "5" above becomes "3". 4440 - * 4441 - * This still overestimates under most circumstances. If we were to pass the 4442 - * start and end offsets in here as well we could do block_to_path() on each 4443 - * block and work out the exact number of indirects which are touched. Pah. 4444 - */ 4327 + static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, 4328 + int chunk) 4329 + { 4330 + int indirects; 4445 4331 4332 + /* if nrblocks are contiguous */ 4333 + if (chunk) { 4334 + /* 4335 + * With N contiguous data blocks, it need at most 4336 + * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks 4337 + * 2 dindirect blocks 4338 + * 1 tindirect block 4339 + */ 4340 + indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); 4341 + return indirects + 3; 4342 + } 4343 + /* 4344 + * if nrblocks are not contiguous, worse case, each block touch 4345 + * a indirect block, and each indirect block touch a double indirect 4346 + * block, plus a triple indirect block 4347 + */ 4348 + indirects = nrblocks * 2 + 1; 4349 + return indirects; 4350 + } 4351 + 4352 + static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4353 + { 4354 + if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 4355 + return ext4_indirect_trans_blocks(inode, nrblocks, 0); 4356 + return ext4_ext_index_trans_blocks(inode, nrblocks, 0); 4357 + } 4358 + /* 4359 + * Account for index blocks, block groups bitmaps and block group 4360 + * descriptor blocks if modify datablocks and index blocks 4361 + * worse case, the indexs blocks spread over different block groups 4362 + * 4363 + * If datablocks are discontiguous, they are possible to spread over 4364 + * different block groups too. If they are contiugous, with flexbg, 4365 + * they could still across block group boundary. 4366 + * 4367 + * Also account for superblock, inode, quota and xattr blocks 4368 + */ 4369 + int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4370 + { 4371 + int groups, gdpblocks; 4372 + int idxblocks; 4373 + int ret = 0; 4374 + 4375 + /* 4376 + * How many index blocks need to touch to modify nrblocks? 4377 + * The "Chunk" flag indicating whether the nrblocks is 4378 + * physically contiguous on disk 4379 + * 4380 + * For Direct IO and fallocate, they calls get_block to allocate 4381 + * one single extent at a time, so they could set the "Chunk" flag 4382 + */ 4383 + idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 4384 + 4385 + ret = idxblocks; 4386 + 4387 + /* 4388 + * Now let's see how many group bitmaps and group descriptors need 4389 + * to account 4390 + */ 4391 + groups = idxblocks; 4392 + if (chunk) 4393 + groups += 1; 4394 + else 4395 + groups += nrblocks; 4396 + 4397 + gdpblocks = groups; 4398 + if (groups > EXT4_SB(inode->i_sb)->s_groups_count) 4399 + groups = EXT4_SB(inode->i_sb)->s_groups_count; 4400 + if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 4401 + gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 4402 + 4403 + /* bitmaps and block group descriptor blocks */ 4404 + ret += groups + gdpblocks; 4405 + 4406 + /* Blocks for super block, inode, quota and xattr blocks */ 4407 + ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 4408 + 4409 + return ret; 4410 + } 4411 + 4412 + /* 4413 + * Calulate the total number of credits to reserve to fit 4414 + * the modification of a single pages into a single transaction, 4415 + * which may include multiple chunks of block allocations. 4416 + * 4417 + * This could be called via ext4_write_begin() 4418 + * 4419 + * We need to consider the worse case, when 4420 + * one new block per extent. 4421 + */ 4446 4422 int ext4_writepage_trans_blocks(struct inode *inode) 4447 4423 { 4448 4424 int bpp = ext4_journal_blocks_per_page(inode); 4449 - int indirects = (EXT4_NDIR_BLOCKS % bpp) ? 5 : 3; 4450 4425 int ret; 4451 4426 4452 - if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) 4453 - return ext4_ext_writepage_trans_blocks(inode, bpp); 4427 + ret = ext4_meta_trans_blocks(inode, bpp, 0); 4454 4428 4429 + /* Account for data blocks for journalled mode */ 4455 4430 if (ext4_should_journal_data(inode)) 4456 - ret = 3 * (bpp + indirects) + 2; 4457 - else 4458 - ret = 2 * (bpp + indirects) + 2; 4459 - 4460 - #ifdef CONFIG_QUOTA 4461 - /* We know that structure was already allocated during DQUOT_INIT so 4462 - * we will be updating only the data blocks + inodes */ 4463 - ret += 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); 4464 - #endif 4465 - 4431 + ret += bpp; 4466 4432 return ret; 4433 + } 4434 + 4435 + /* 4436 + * Calculate the journal credits for a chunk of data modification. 4437 + * 4438 + * This is called from DIO, fallocate or whoever calling 4439 + * ext4_get_blocks_wrap() to map/allocate a chunk of contigous disk blocks. 4440 + * 4441 + * journal buffers for data blocks are not included here, as DIO 4442 + * and fallocate do no need to journal data buffers. 4443 + */ 4444 + int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 4445 + { 4446 + return ext4_meta_trans_blocks(inode, nrblocks, 1); 4467 4447 } 4468 4448 4469 4449 /*
+46 -7
fs/ext4/mballoc.c
··· 3282 3282 } 3283 3283 3284 3284 /* 3285 + * Return the prealloc space that have minimal distance 3286 + * from the goal block. @cpa is the prealloc 3287 + * space that is having currently known minimal distance 3288 + * from the goal block. 3289 + */ 3290 + static struct ext4_prealloc_space * 3291 + ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 3292 + struct ext4_prealloc_space *pa, 3293 + struct ext4_prealloc_space *cpa) 3294 + { 3295 + ext4_fsblk_t cur_distance, new_distance; 3296 + 3297 + if (cpa == NULL) { 3298 + atomic_inc(&pa->pa_count); 3299 + return pa; 3300 + } 3301 + cur_distance = abs(goal_block - cpa->pa_pstart); 3302 + new_distance = abs(goal_block - pa->pa_pstart); 3303 + 3304 + if (cur_distance < new_distance) 3305 + return cpa; 3306 + 3307 + /* drop the previous reference */ 3308 + atomic_dec(&cpa->pa_count); 3309 + atomic_inc(&pa->pa_count); 3310 + return pa; 3311 + } 3312 + 3313 + /* 3285 3314 * search goal blocks in preallocated space 3286 3315 */ 3287 3316 static noinline_for_stack int ··· 3319 3290 int order, i; 3320 3291 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 3321 3292 struct ext4_locality_group *lg; 3322 - struct ext4_prealloc_space *pa; 3293 + struct ext4_prealloc_space *pa, *cpa = NULL; 3294 + ext4_fsblk_t goal_block; 3323 3295 3324 3296 /* only data can be preallocated */ 3325 3297 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) ··· 3363 3333 /* The max size of hash table is PREALLOC_TB_SIZE */ 3364 3334 order = PREALLOC_TB_SIZE - 1; 3365 3335 3336 + goal_block = ac->ac_g_ex.fe_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb) + 3337 + ac->ac_g_ex.fe_start + 3338 + le32_to_cpu(EXT4_SB(ac->ac_sb)->s_es->s_first_data_block); 3339 + /* 3340 + * search for the prealloc space that is having 3341 + * minimal distance from the goal block. 3342 + */ 3366 3343 for (i = order; i < PREALLOC_TB_SIZE; i++) { 3367 3344 rcu_read_lock(); 3368 3345 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], ··· 3377 3340 spin_lock(&pa->pa_lock); 3378 3341 if (pa->pa_deleted == 0 && 3379 3342 pa->pa_free >= ac->ac_o_ex.fe_len) { 3380 - atomic_inc(&pa->pa_count); 3381 - ext4_mb_use_group_pa(ac, pa); 3382 - spin_unlock(&pa->pa_lock); 3383 - ac->ac_criteria = 20; 3384 - rcu_read_unlock(); 3385 - return 1; 3343 + 3344 + cpa = ext4_mb_check_group_pa(goal_block, 3345 + pa, cpa); 3386 3346 } 3387 3347 spin_unlock(&pa->pa_lock); 3388 3348 } 3389 3349 rcu_read_unlock(); 3350 + } 3351 + if (cpa) { 3352 + ext4_mb_use_group_pa(ac, cpa); 3353 + ac->ac_criteria = 20; 3354 + return 1; 3390 3355 } 3391 3356 return 0; 3392 3357 }
+2 -1
fs/ext4/migrate.c
··· 53 53 * credit. But below we try to not accumalate too much 54 54 * of them by restarting the journal. 55 55 */ 56 - needed = ext4_ext_calc_credits_for_insert(inode, path); 56 + needed = ext4_ext_calc_credits_for_single_extent(inode, 57 + lb->last_block - lb->first_block + 1, path); 57 58 58 59 /* 59 60 * Make sure the credit we accumalated is not really high
+2 -1
fs/ext4/resize.c
··· 773 773 774 774 if (reserved_gdb || gdb_off == 0) { 775 775 if (!EXT4_HAS_COMPAT_FEATURE(sb, 776 - EXT4_FEATURE_COMPAT_RESIZE_INODE)){ 776 + EXT4_FEATURE_COMPAT_RESIZE_INODE) 777 + || !le16_to_cpu(es->s_reserved_gdt_blocks)) { 777 778 ext4_warning(sb, __func__, 778 779 "No reserved GDT blocks, can't resize"); 779 780 return -EPERM;
+1
fs/ext4/super.c
··· 568 568 #endif 569 569 ei->i_block_alloc_info = NULL; 570 570 ei->vfs_inode.i_version = 1; 571 + ei->vfs_inode.i_data.writeback_index = 0; 571 572 memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache)); 572 573 INIT_LIST_HEAD(&ei->i_prealloc_list); 573 574 spin_lock_init(&ei->i_prealloc_lock);
+3 -7
fs/fat/inode.c
··· 562 562 struct buffer_head *bh; 563 563 struct msdos_dir_entry *raw_entry; 564 564 loff_t i_pos; 565 - int err = 0; 565 + int err; 566 566 567 567 retry: 568 568 i_pos = MSDOS_I(inode)->i_pos; 569 569 if (inode->i_ino == MSDOS_ROOT_INO || !i_pos) 570 570 return 0; 571 571 572 - lock_super(sb); 573 572 bh = sb_bread(sb, i_pos >> sbi->dir_per_block_bits); 574 573 if (!bh) { 575 574 printk(KERN_ERR "FAT: unable to read inode block " 576 575 "for updating (i_pos %lld)\n", i_pos); 577 - err = -EIO; 578 - goto out; 576 + return -EIO; 579 577 } 580 578 spin_lock(&sbi->inode_hash_lock); 581 579 if (i_pos != MSDOS_I(inode)->i_pos) { 582 580 spin_unlock(&sbi->inode_hash_lock); 583 581 brelse(bh); 584 - unlock_super(sb); 585 582 goto retry; 586 583 } 587 584 ··· 604 607 } 605 608 spin_unlock(&sbi->inode_hash_lock); 606 609 mark_buffer_dirty(bh); 610 + err = 0; 607 611 if (wait) 608 612 err = sync_dirty_buffer(bh); 609 613 brelse(bh); 610 - out: 611 - unlock_super(sb); 612 614 return err; 613 615 } 614 616
+4 -4
fs/ioprio.c
··· 115 115 pgrp = task_pgrp(current); 116 116 else 117 117 pgrp = find_vpid(who); 118 - do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 118 + do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 119 119 ret = set_task_ioprio(p, ioprio); 120 120 if (ret) 121 121 break; 122 - } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 122 + } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 123 123 break; 124 124 case IOPRIO_WHO_USER: 125 125 if (!who) ··· 204 204 pgrp = task_pgrp(current); 205 205 else 206 206 pgrp = find_vpid(who); 207 - do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 207 + do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 208 208 tmpio = get_task_ioprio(p); 209 209 if (tmpio < 0) 210 210 continue; ··· 212 212 ret = tmpio; 213 213 else 214 214 ret = ioprio_best(ret, tmpio); 215 - } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 215 + } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 216 216 break; 217 217 case IOPRIO_WHO_USER: 218 218 if (!who)
-1
fs/jffs2/jffs2_fs_i.h
··· 12 12 #ifndef _JFFS2_FS_I 13 13 #define _JFFS2_FS_I 14 14 15 - #include <linux/version.h> 16 15 #include <linux/rbtree.h> 17 16 #include <linux/posix_acl.h> 18 17 #include <linux/mutex.h>
+2 -2
fs/proc/nommu.c
··· 52 52 } 53 53 54 54 seq_printf(m, 55 - "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n", 55 + "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", 56 56 vma->vm_start, 57 57 vma->vm_end, 58 58 flags & VM_READ ? 'r' : '-', 59 59 flags & VM_WRITE ? 'w' : '-', 60 60 flags & VM_EXEC ? 'x' : '-', 61 61 flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p', 62 - vma->vm_pgoff << PAGE_SHIFT, 62 + ((loff_t)vma->vm_pgoff) << PAGE_SHIFT, 63 63 MAJOR(dev), MINOR(dev), ino, &len); 64 64 65 65 if (file) {
+2 -2
fs/proc/task_mmu.c
··· 219 219 ino = inode->i_ino; 220 220 } 221 221 222 - seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n", 222 + seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", 223 223 vma->vm_start, 224 224 vma->vm_end, 225 225 flags & VM_READ ? 'r' : '-', 226 226 flags & VM_WRITE ? 'w' : '-', 227 227 flags & VM_EXEC ? 'x' : '-', 228 228 flags & VM_MAYSHARE ? 's' : 'p', 229 - vma->vm_pgoff << PAGE_SHIFT, 229 + ((loff_t)vma->vm_pgoff) << PAGE_SHIFT, 230 230 MAJOR(dev), MINOR(dev), ino, &len); 231 231 232 232 /*
-1
fs/xfs/xfs_dmapi.h
··· 18 18 #ifndef __XFS_DMAPI_H__ 19 19 #define __XFS_DMAPI_H__ 20 20 21 - #include <linux/version.h> 22 21 /* Values used to define the on-disk version of dm_attrname_t. All 23 22 * on-disk attribute names start with the 8-byte string "SGI_DMI_". 24 23 *
-1
include/asm-cris/Kbuild
··· 1 1 include include/asm-generic/Kbuild.asm 2 2 3 - header-y += arch/ 4 3 header-y += arch-v10/ 5 4 header-y += arch-v32/ 6 5
+2
include/asm-frv/io.h
··· 271 271 return __ioremap(physaddr, size, IOMAP_FULL_CACHING); 272 272 } 273 273 274 + #define ioremap_wc ioremap_nocache 275 + 274 276 extern void iounmap(void volatile __iomem *addr); 275 277 276 278 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
+2
include/asm-mn10300/io.h
··· 259 259 return (void *) (offset | 0x20000000); 260 260 } 261 261 262 + #define ioremap_wc ioremap_nocache 263 + 262 264 static inline void iounmap(void *addr) 263 265 { 264 266 }
-1
include/asm-x86/xen/hypervisor.h
··· 35 35 36 36 #include <linux/types.h> 37 37 #include <linux/kernel.h> 38 - #include <linux/version.h> 39 38 40 39 #include <xen/interface/xen.h> 41 40 #include <xen/interface/version.h>
+109 -46
include/linux/ata.h
··· 46 46 ATA_MAX_SECTORS_TAPE = 65535, 47 47 48 48 ATA_ID_WORDS = 256, 49 + ATA_ID_CONFIG = 0, 50 + ATA_ID_CYLS = 1, 51 + ATA_ID_HEADS = 3, 52 + ATA_ID_SECTORS = 6, 49 53 ATA_ID_SERNO = 10, 54 + ATA_ID_BUF_SIZE = 21, 50 55 ATA_ID_FW_REV = 23, 51 56 ATA_ID_PROD = 27, 57 + ATA_ID_MAX_MULTSECT = 47, 58 + ATA_ID_DWORD_IO = 48, 59 + ATA_ID_CAPABILITY = 49, 52 60 ATA_ID_OLD_PIO_MODES = 51, 61 + ATA_ID_OLD_DMA_MODES = 52, 53 62 ATA_ID_FIELD_VALID = 53, 63 + ATA_ID_CUR_CYLS = 54, 64 + ATA_ID_CUR_HEADS = 55, 65 + ATA_ID_CUR_SECTORS = 56, 66 + ATA_ID_MULTSECT = 59, 67 + ATA_ID_LBA_CAPACITY = 60, 68 + ATA_ID_SWDMA_MODES = 62, 54 69 ATA_ID_MWDMA_MODES = 63, 55 70 ATA_ID_PIO_MODES = 64, 56 71 ATA_ID_EIDE_DMA_MIN = 65, 72 + ATA_ID_EIDE_DMA_TIME = 66, 57 73 ATA_ID_EIDE_PIO = 67, 58 74 ATA_ID_EIDE_PIO_IORDY = 68, 59 - ATA_ID_UDMA_MODES = 88, 75 + ATA_ID_QUEUE_DEPTH = 75, 60 76 ATA_ID_MAJOR_VER = 80, 77 + ATA_ID_COMMAND_SET_1 = 82, 78 + ATA_ID_COMMAND_SET_2 = 83, 79 + ATA_ID_CFSSE = 84, 80 + ATA_ID_CFS_ENABLE_1 = 85, 81 + ATA_ID_CFS_ENABLE_2 = 86, 82 + ATA_ID_CSF_DEFAULT = 87, 83 + ATA_ID_UDMA_MODES = 88, 84 + ATA_ID_HW_CONFIG = 93, 85 + ATA_ID_SPG = 98, 86 + ATA_ID_LBA_CAPACITY_2 = 100, 87 + ATA_ID_LAST_LUN = 126, 88 + ATA_ID_DLF = 128, 89 + ATA_ID_CSFO = 129, 90 + ATA_ID_CFA_POWER = 160, 61 91 ATA_ID_PIO4 = (1 << 1), 62 92 63 93 ATA_ID_SERNO_LEN = 20, ··· 153 123 ATA_BUSY = (1 << 7), /* BSY status bit */ 154 124 ATA_DRDY = (1 << 6), /* device ready */ 155 125 ATA_DF = (1 << 5), /* device fault */ 126 + ATA_DSC = (1 << 4), /* drive seek complete */ 156 127 ATA_DRQ = (1 << 3), /* data request i/o */ 128 + ATA_CORR = (1 << 2), /* corrected data error */ 129 + ATA_IDX = (1 << 1), /* index */ 157 130 ATA_ERR = (1 << 0), /* have an error */ 158 131 ATA_SRST = (1 << 2), /* software reset */ 159 132 ATA_ICRC = (1 << 7), /* interface CRC error */ 133 + ATA_BBK = ATA_ICRC, /* pre-EIDE: block marked bad */ 160 134 ATA_UNC = (1 << 6), /* uncorrectable media error */ 135 + ATA_MC = (1 << 5), /* media changed */ 161 136 ATA_IDNF = (1 << 4), /* ID not found */ 137 + ATA_MCR = (1 << 3), /* media change requested */ 162 138 ATA_ABORTED = (1 << 2), /* command aborted */ 139 + ATA_TRK0NF = (1 << 1), /* track 0 not found */ 140 + ATA_AMNF = (1 << 0), /* address mark not found */ 141 + ATAPI_LFS = 0xF0, /* last failed sense */ 142 + ATAPI_EOM = ATA_TRK0NF, /* end of media */ 143 + ATAPI_ILI = ATA_AMNF, /* illegal length indication */ 144 + ATAPI_IO = (1 << 1), 145 + ATAPI_COD = (1 << 0), 163 146 164 147 /* ATA command block registers */ 165 148 ATA_REG_DATA = 0x00, ··· 235 192 ATA_CMD_PMP_WRITE = 0xE8, 236 193 ATA_CMD_CONF_OVERLAY = 0xB1, 237 194 ATA_CMD_SEC_FREEZE_LOCK = 0xF5, 195 + ATA_CMD_SMART = 0xB0, 196 + ATA_CMD_MEDIA_LOCK = 0xDE, 197 + ATA_CMD_MEDIA_UNLOCK = 0xDF, 198 + /* marked obsolete in the ATA/ATAPI-7 spec */ 199 + ATA_CMD_RESTORE = 0x10, 200 + /* EXABYTE specific */ 201 + ATA_EXABYTE_ENABLE_NEST = 0xF0, 238 202 239 203 /* READ_LOG_EXT pages */ 240 204 ATA_LOG_SATA_NCQ = 0x10, ··· 282 232 SETFEATURES_WC_ON = 0x02, /* Enable write cache */ 283 233 SETFEATURES_WC_OFF = 0x82, /* Disable write cache */ 284 234 235 + /* Enable/Disable Automatic Acoustic Management */ 236 + SETFEATURES_AAM_ON = 0x42, 237 + SETFEATURES_AAM_OFF = 0xC2, 238 + 285 239 SETFEATURES_SPINUP = 0x07, /* Spin-up drive */ 286 240 287 241 SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */ ··· 307 253 ATA_DCO_FREEZE_LOCK = 0xC1, 308 254 ATA_DCO_IDENTIFY = 0xC2, 309 255 ATA_DCO_SET = 0xC3, 256 + 257 + /* feature values for SMART */ 258 + ATA_SMART_ENABLE = 0xD8, 259 + ATA_SMART_READ_VALUES = 0xD0, 260 + ATA_SMART_READ_THRESHOLDS = 0xD1, 261 + 262 + /* password used in LBA Mid / LBA High for executing SMART commands */ 263 + ATA_SMART_LBAM_PASS = 0x4F, 264 + ATA_SMART_LBAH_PASS = 0xC2, 310 265 311 266 /* ATAPI stuff */ 312 267 ATAPI_PKT_DMA = (1 << 0), ··· 501 438 /* 502 439 * id tests 503 440 */ 504 - #define ata_id_is_ata(id) (((id)[0] & (1 << 15)) == 0) 505 - #define ata_id_has_lba(id) ((id)[49] & (1 << 9)) 506 - #define ata_id_has_dma(id) ((id)[49] & (1 << 8)) 441 + #define ata_id_is_ata(id) (((id)[ATA_ID_CONFIG] & (1 << 15)) == 0) 442 + #define ata_id_has_lba(id) ((id)[ATA_ID_CAPABILITY] & (1 << 9)) 443 + #define ata_id_has_dma(id) ((id)[ATA_ID_CAPABILITY] & (1 << 8)) 507 444 #define ata_id_has_ncq(id) ((id)[76] & (1 << 8)) 508 - #define ata_id_queue_depth(id) (((id)[75] & 0x1f) + 1) 509 - #define ata_id_removeable(id) ((id)[0] & (1 << 7)) 445 + #define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1) 446 + #define ata_id_removeable(id) ((id)[ATA_ID_CONFIG] & (1 << 7)) 510 447 #define ata_id_has_atapi_AN(id) \ 511 448 ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \ 512 449 ((id)[78] & (1 << 5)) ) 513 - #define ata_id_iordy_disable(id) ((id)[49] & (1 << 10)) 514 - #define ata_id_has_iordy(id) ((id)[49] & (1 << 11)) 450 + #define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10)) 451 + #define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11)) 515 452 #define ata_id_u32(id,n) \ 516 453 (((u32) (id)[(n) + 1] << 16) | ((u32) (id)[(n)])) 517 454 #define ata_id_u64(id,n) \ ··· 520 457 ((u64) (id)[(n) + 1] << 16) | \ 521 458 ((u64) (id)[(n) + 0]) ) 522 459 523 - #define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20) 460 + #define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) 524 461 525 462 static inline bool ata_id_has_hipm(const u16 *id) 526 463 { ··· 545 482 546 483 static inline int ata_id_has_fua(const u16 *id) 547 484 { 548 - if ((id[84] & 0xC000) != 0x4000) 485 + if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000) 549 486 return 0; 550 - return id[84] & (1 << 6); 487 + return id[ATA_ID_CFSSE] & (1 << 6); 551 488 } 552 489 553 490 static inline int ata_id_has_flush(const u16 *id) 554 491 { 555 - if ((id[83] & 0xC000) != 0x4000) 492 + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) 556 493 return 0; 557 - return id[83] & (1 << 12); 494 + return id[ATA_ID_COMMAND_SET_2] & (1 << 12); 558 495 } 559 496 560 497 static inline int ata_id_has_flush_ext(const u16 *id) 561 498 { 562 - if ((id[83] & 0xC000) != 0x4000) 499 + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) 563 500 return 0; 564 - return id[83] & (1 << 13); 501 + return id[ATA_ID_COMMAND_SET_2] & (1 << 13); 565 502 } 566 503 567 504 static inline int ata_id_has_lba48(const u16 *id) 568 505 { 569 - if ((id[83] & 0xC000) != 0x4000) 506 + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) 570 507 return 0; 571 - if (!ata_id_u64(id, 100)) 508 + if (!ata_id_u64(id, ATA_ID_LBA_CAPACITY_2)) 572 509 return 0; 573 - return id[83] & (1 << 10); 510 + return id[ATA_ID_COMMAND_SET_2] & (1 << 10); 574 511 } 575 512 576 513 static inline int ata_id_hpa_enabled(const u16 *id) 577 514 { 578 515 /* Yes children, word 83 valid bits cover word 82 data */ 579 - if ((id[83] & 0xC000) != 0x4000) 516 + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) 580 517 return 0; 581 518 /* And 87 covers 85-87 */ 582 - if ((id[87] & 0xC000) != 0x4000) 519 + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) 583 520 return 0; 584 521 /* Check command sets enabled as well as supported */ 585 - if ((id[85] & ( 1 << 10)) == 0) 522 + if ((id[ATA_ID_CFS_ENABLE_1] & (1 << 10)) == 0) 586 523 return 0; 587 - return id[82] & (1 << 10); 524 + return id[ATA_ID_COMMAND_SET_1] & (1 << 10); 588 525 } 589 526 590 527 static inline int ata_id_has_wcache(const u16 *id) 591 528 { 592 529 /* Yes children, word 83 valid bits cover word 82 data */ 593 - if ((id[83] & 0xC000) != 0x4000) 530 + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) 594 531 return 0; 595 - return id[82] & (1 << 5); 532 + return id[ATA_ID_COMMAND_SET_1] & (1 << 5); 596 533 } 597 534 598 535 static inline int ata_id_has_pm(const u16 *id) 599 536 { 600 - if ((id[83] & 0xC000) != 0x4000) 537 + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) 601 538 return 0; 602 - return id[82] & (1 << 3); 539 + return id[ATA_ID_COMMAND_SET_1] & (1 << 3); 603 540 } 604 541 605 542 static inline int ata_id_rahead_enabled(const u16 *id) 606 543 { 607 - if ((id[87] & 0xC000) != 0x4000) 544 + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) 608 545 return 0; 609 - return id[85] & (1 << 6); 546 + return id[ATA_ID_CFS_ENABLE_1] & (1 << 6); 610 547 } 611 548 612 549 static inline int ata_id_wcache_enabled(const u16 *id) 613 550 { 614 - if ((id[87] & 0xC000) != 0x4000) 551 + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) 615 552 return 0; 616 - return id[85] & (1 << 5); 553 + return id[ATA_ID_CFS_ENABLE_1] & (1 << 5); 617 554 } 618 555 619 556 /** ··· 644 581 645 582 static inline int ata_id_is_sata(const u16 *id) 646 583 { 647 - return ata_id_major_version(id) >= 5 && id[93] == 0; 584 + return ata_id_major_version(id) >= 5 && id[ATA_ID_HW_CONFIG] == 0; 648 585 } 649 586 650 587 static inline int ata_id_has_tpm(const u16 *id) ··· 662 599 /* ATA 8 reuses this flag for "trusted" computing */ 663 600 if (ata_id_major_version(id) > 7) 664 601 return 0; 665 - if (id[48] & (1 << 0)) 602 + if (id[ATA_ID_DWORD_IO] & (1 << 0)) 666 603 return 1; 667 604 return 0; 668 605 } ··· 671 608 { 672 609 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command 673 610 has not been issued to the device then the values of 674 - id[54] to id[56] are vendor specific. */ 675 - return (id[53] & 0x01) && /* Current translation valid */ 676 - id[54] && /* cylinders in current translation */ 677 - id[55] && /* heads in current translation */ 678 - id[55] <= 16 && 679 - id[56]; /* sectors in current translation */ 611 + id[ATA_ID_CUR_CYLS] to id[ATA_ID_CUR_SECTORS] are vendor specific. */ 612 + return (id[ATA_ID_FIELD_VALID] & 1) && /* Current translation valid */ 613 + id[ATA_ID_CUR_CYLS] && /* cylinders in current translation */ 614 + id[ATA_ID_CUR_HEADS] && /* heads in current translation */ 615 + id[ATA_ID_CUR_HEADS] <= 16 && 616 + id[ATA_ID_CUR_SECTORS]; /* sectors in current translation */ 680 617 } 681 618 682 619 static inline int ata_id_is_cfa(const u16 *id) 683 620 { 684 - u16 v = id[0]; 685 - if (v == 0x848A) /* Standard CF */ 621 + if (id[ATA_ID_CONFIG] == 0x848A) /* Standard CF */ 686 622 return 1; 687 623 /* Could be CF hiding as standard ATA */ 688 - if (ata_id_major_version(id) >= 3 && id[82] != 0xFFFF && 689 - (id[82] & ( 1 << 2))) 624 + if (ata_id_major_version(id) >= 3 && 625 + id[ATA_ID_COMMAND_SET_1] != 0xFFFF && 626 + (id[ATA_ID_COMMAND_SET_1] & (1 << 2))) 690 627 return 1; 691 628 return 0; 692 629 } ··· 695 632 { 696 633 if (ata_id_is_sata(dev_id)) 697 634 return 0; /* SATA */ 698 - if ((dev_id[93] & 0xE000) == 0x6000) 635 + if ((dev_id[ATA_ID_HW_CONFIG] & 0xE000) == 0x6000) 699 636 return 0; /* 80 wire */ 700 637 return 1; 701 638 } 702 639 703 640 static inline int ata_drive_40wire_relaxed(const u16 *dev_id) 704 641 { 705 - if ((dev_id[93] & 0x2000) == 0x2000) 642 + if ((dev_id[ATA_ID_HW_CONFIG] & 0x2000) == 0x2000) 706 643 return 0; /* 80 wire */ 707 644 return 1; 708 645 } 709 646 710 647 static inline int atapi_cdb_len(const u16 *dev_id) 711 648 { 712 - u16 tmp = dev_id[0] & 0x3; 649 + u16 tmp = dev_id[ATA_ID_CONFIG] & 0x3; 713 650 switch (tmp) { 714 651 case 0: return 12; 715 652 case 1: return 16; ··· 719 656 720 657 static inline int atapi_command_packet_set(const u16 *dev_id) 721 658 { 722 - return (dev_id[0] >> 8) & 0x1f; 659 + return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; 723 660 } 724 661 725 662 static inline int atapi_id_dmadir(const u16 *dev_id)
+3 -2
include/linux/device.h
··· 358 358 359 359 struct kobject kobj; 360 360 char bus_id[BUS_ID_SIZE]; /* position on parent bus */ 361 + const char *init_name; /* initial name of the device */ 361 362 struct device_type *type; 362 363 unsigned uevent_suppress:1; 363 364 ··· 407 406 /* Get the wakeup routines, which depend on struct device */ 408 407 #include <linux/pm_wakeup.h> 409 408 410 - static inline const char *dev_name(struct device *dev) 409 + static inline const char *dev_name(const struct device *dev) 411 410 { 412 411 /* will be changed into kobject_name(&dev->kobj) in the near future */ 413 412 return dev->bus_id; ··· 519 518 extern void sysdev_shutdown(void); 520 519 521 520 /* debugging and troubleshooting/diagnostic helpers. */ 522 - extern const char *dev_driver_string(struct device *dev); 521 + extern const char *dev_driver_string(const struct device *dev); 523 522 #define dev_printk(level, dev, format, arg...) \ 524 523 printk(level "%s %s: " format , dev_driver_string(dev) , \ 525 524 dev_name(dev) , ## arg)
+21
include/linux/exportfs.h
··· 35 35 FILEID_INO32_GEN_PARENT = 2, 36 36 37 37 /* 38 + * 64 bit object ID, 64 bit root object ID, 39 + * 32 bit generation number. 40 + */ 41 + FILEID_BTRFS_WITHOUT_PARENT = 0x4d, 42 + 43 + /* 44 + * 64 bit object ID, 64 bit root object ID, 45 + * 32 bit generation number, 46 + * 64 bit parent object ID, 32 bit parent generation. 47 + */ 48 + FILEID_BTRFS_WITH_PARENT = 0x4e, 49 + 50 + /* 51 + * 64 bit object ID, 64 bit root object ID, 52 + * 32 bit generation number, 53 + * 64 bit parent object ID, 32 bit parent generation, 54 + * 64 bit parent root object ID. 55 + */ 56 + FILEID_BTRFS_WITH_PARENT_ROOT = 0x4f, 57 + 58 + /* 38 59 * 32 bit block number, 16 bit partition reference, 39 60 * 16 bit unused, 32 bit generation number. 40 61 */
+3
include/linux/fb.h
··· 976 976 977 977 /* drivers/video/fb_defio.c */ 978 978 extern void fb_deferred_io_init(struct fb_info *info); 979 + extern void fb_deferred_io_open(struct fb_info *info, 980 + struct inode *inode, 981 + struct file *file); 979 982 extern void fb_deferred_io_cleanup(struct fb_info *info); 980 983 extern int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, 981 984 int datasync);
-1
include/linux/fs_uart_pd.h
··· 12 12 #ifndef FS_UART_PD_H 13 13 #define FS_UART_PD_H 14 14 15 - #include <linux/version.h> 16 15 #include <asm/types.h> 17 16 18 17 enum fs_uart_id {
+1
include/linux/if_tun.h
··· 45 45 #define TUNGETFEATURES _IOR('T', 207, unsigned int) 46 46 #define TUNSETOFFLOAD _IOW('T', 208, unsigned int) 47 47 #define TUNSETTXFILTER _IOW('T', 209, unsigned int) 48 + #define TUNGETIFF _IOR('T', 210, unsigned int) 48 49 49 50 /* TUNSETIFF ifr flags */ 50 51 #define IFF_TUN 0x0001
+24
include/linux/libata.h
··· 163 163 ATA_DEV_NONE = 9, /* no device */ 164 164 165 165 /* struct ata_link flags */ 166 + ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ 166 167 ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */ 167 168 ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */ 168 169 ATA_LFLAG_ASSUME_SEMB = (1 << 4), /* assume SEMB class */ ··· 647 646 648 647 unsigned int flags; /* ATA_LFLAG_xxx */ 649 648 649 + u32 saved_scontrol; /* SControl on probe */ 650 650 unsigned int hw_sata_spd_limit; 651 651 unsigned int sata_spd_limit; 652 652 unsigned int sata_spd; /* current SATA PHY speed */ ··· 1429 1427 return from_jiffies + msecs_to_jiffies(timeout_msecs); 1430 1428 } 1431 1429 1430 + /* Don't open code these in drivers as there are traps. Firstly the range may 1431 + change in future hardware and specs, secondly 0xFF means 'no DMA' but is 1432 + > UDMA_0. Dyma ddreigiau */ 1433 + 1434 + static inline int ata_using_mwdma(struct ata_device *adev) 1435 + { 1436 + if (adev->dma_mode >= XFER_MW_DMA_0 && adev->dma_mode <= XFER_MW_DMA_4) 1437 + return 1; 1438 + return 0; 1439 + } 1440 + 1441 + static inline int ata_using_udma(struct ata_device *adev) 1442 + { 1443 + if (adev->dma_mode >= XFER_UDMA_0 && adev->dma_mode <= XFER_UDMA_7) 1444 + return 1; 1445 + return 0; 1446 + } 1447 + 1448 + static inline int ata_dma_enabled(struct ata_device *adev) 1449 + { 1450 + return (adev->dma_mode == 0xFF ? 0 : 1); 1451 + } 1432 1452 1433 1453 /************************************************************************** 1434 1454 * PMP - drivers/ata/libata-pmp.c
+11
include/linux/pci-acpi.h
··· 57 57 { 58 58 return __pci_osc_support_set(flags, PCI_EXPRESS_ROOT_HID_STRING); 59 59 } 60 + static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) 61 + { 62 + /* Find root host bridge */ 63 + while (pdev->bus->self) 64 + pdev = pdev->bus->self; 65 + 66 + return acpi_get_pci_rootbridge_handle(pci_domain_nr(pdev->bus), 67 + pdev->bus->number); 68 + } 60 69 #else 61 70 #if !defined(AE_ERROR) 62 71 typedef u32 acpi_status; ··· 75 66 {return AE_ERROR;} 76 67 static inline acpi_status pci_osc_support_set(u32 flags) {return AE_ERROR;} 77 68 static inline acpi_status pcie_osc_support_set(u32 flags) {return AE_ERROR;} 69 + static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) 70 + { return NULL; } 78 71 #endif 79 72 80 73 #endif /* _PCI_ACPI_H_ */
+3
include/linux/pci_ids.h
··· 2428 2428 #define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a 2429 2429 #define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30 2430 2430 #define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60 2431 + #define PCI_DEVICE_ID_INTEL_PCH_0 0x3b10 2432 + #define PCI_DEVICE_ID_INTEL_PCH_1 0x3b11 2433 + #define PCI_DEVICE_ID_INTEL_PCH_2 0x3b30 2431 2434 #define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f 2432 2435 #define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 2433 2436 #define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
+9
include/linux/pid.h
··· 161 161 } \ 162 162 } while (0) 163 163 164 + #define do_each_pid_thread(pid, type, task) \ 165 + do_each_pid_task(pid, type, task) { \ 166 + struct task_struct *tg___ = task; \ 167 + do { 168 + 169 + #define while_each_pid_thread(pid, type, task) \ 170 + } while_each_thread(tg___, task); \ 171 + task = tg___; \ 172 + } while_each_pid_task(pid, type, task) 164 173 #endif /* _LINUX_PID_H */
+1 -1
include/linux/rmap.h
··· 102 102 * Called from mm/filemap_xip.c to unmap empty zero page 103 103 */ 104 104 pte_t *page_check_address(struct page *, struct mm_struct *, 105 - unsigned long, spinlock_t **); 105 + unsigned long, spinlock_t **, int); 106 106 107 107 /* 108 108 * Used by swapoff to help locate where page is expected in vma.
+4
include/linux/skbuff.h
··· 1452 1452 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, 1453 1453 int hlen, 1454 1454 struct iovec *iov); 1455 + extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, 1456 + int offset, 1457 + struct iovec *from, 1458 + int len); 1455 1459 extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 1456 1460 extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, 1457 1461 unsigned int flags);
+4 -1
include/linux/tick.h
··· 74 74 extern int tick_init_highres(void); 75 75 extern int tick_program_event(ktime_t expires, int force); 76 76 extern void tick_setup_sched_timer(void); 77 + # endif 78 + 79 + # if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS 77 80 extern void tick_cancel_sched_timer(int cpu); 78 81 # else 79 82 static inline void tick_cancel_sched_timer(int cpu) { } 80 - # endif /* HIGHRES */ 83 + # endif 81 84 82 85 # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 83 86 extern struct tick_device *tick_get_broadcast_device(void);
+3
include/linux/usb.h
··· 110 110 * @sysfs_files_created: sysfs attributes exist 111 111 * @needs_remote_wakeup: flag set when the driver requires remote-wakeup 112 112 * capability during autosuspend. 113 + * @needs_altsetting0: flag set when a set-interface request for altsetting 0 114 + * has been deferred. 113 115 * @needs_binding: flag set when the driver should be re-probed or unbound 114 116 * following a reset or suspend operation it doesn't support. 115 117 * @dev: driver model's view of this device ··· 164 162 unsigned is_active:1; /* the interface is not suspended */ 165 163 unsigned sysfs_files_created:1; /* the sysfs attributes exist */ 166 164 unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */ 165 + unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */ 167 166 unsigned needs_binding:1; /* needs delayed unbind/rebind */ 168 167 169 168 struct device dev; /* interface specific device info */
+2 -1
include/net/addrconf.h
··· 80 80 struct net_device *dev, 81 81 int strict); 82 82 83 - extern int ipv6_dev_get_saddr(struct net_device *dev, 83 + extern int ipv6_dev_get_saddr(struct net *net, 84 + struct net_device *dev, 84 85 const struct in6_addr *daddr, 85 86 unsigned int srcprefs, 86 87 struct in6_addr *saddr);
+1
include/net/ip6_route.h
··· 107 107 { 108 108 struct sk_buff *skb; 109 109 struct netlink_callback *cb; 110 + struct net *net; 110 111 }; 111 112 112 113 extern int rt6_dump_route(struct rt6_info *rt, void *p_arg);
+3 -8
include/net/mac80211.h
··· 708 708 * rely on the host system for such buffering. This option is used 709 709 * to configure the IEEE 802.11 upper layer to buffer broadcast and 710 710 * multicast frames when there are power saving stations so that 711 - * the driver can fetch them with ieee80211_get_buffered_bc(). Note 712 - * that not setting this flag works properly only when the 713 - * %IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE is also not set because 714 - * otherwise the stack will not know when the DTIM beacon was sent. 711 + * the driver can fetch them with ieee80211_get_buffered_bc(). 715 712 * 716 713 * @IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE: 717 714 * Hardware is not capable of short slot operation on the 2.4 GHz band. ··· 1096 1099 * See the section "Frame filtering" for more information. 1097 1100 * This callback must be implemented and atomic. 1098 1101 * 1099 - * @set_tim: Set TIM bit. If the hardware/firmware takes care of beacon 1100 - * generation (that is, %IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE is set) 1101 - * mac80211 calls this function when a TIM bit must be set or cleared 1102 - * for a given AID. Must be atomic. 1102 + * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit 1103 + * must be set or cleared for a given AID. Must be atomic. 1103 1104 * 1104 1105 * @set_key: See the section "Hardware crypto acceleration" 1105 1106 * This callback can sleep, and is only called between add_interface
+1
include/net/pkt_sched.h
··· 78 78 79 79 extern int register_qdisc(struct Qdisc_ops *qops); 80 80 extern int unregister_qdisc(struct Qdisc_ops *qops); 81 + extern void qdisc_list_del(struct Qdisc *q); 81 82 extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); 82 83 extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); 83 84 extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
+6 -1
include/net/sch_generic.h
··· 27 27 { 28 28 __QDISC_STATE_RUNNING, 29 29 __QDISC_STATE_SCHED, 30 + __QDISC_STATE_DEACTIVATED, 30 31 }; 31 32 32 33 struct qdisc_size_table { ··· 61 60 struct gnet_stats_basic bstats; 62 61 struct gnet_stats_queue qstats; 63 62 struct gnet_stats_rate_est rate_est; 64 - struct rcu_head q_rcu; 65 63 int (*reshape_fail)(struct sk_buff *skb, 66 64 struct Qdisc *q); 67 65 ··· 191 191 static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc) 192 192 { 193 193 return qdisc->dev_queue->qdisc; 194 + } 195 + 196 + static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc) 197 + { 198 + return qdisc->dev_queue->qdisc_sleeping; 194 199 } 195 200 196 201 /* The qdisc root lock is a mechanism by which to top level
+2 -1
include/scsi/scsi_device.h
··· 6 6 #include <linux/spinlock.h> 7 7 #include <linux/workqueue.h> 8 8 #include <linux/blkdev.h> 9 + #include <scsi/scsi.h> 9 10 #include <asm/atomic.h> 10 11 11 12 struct request_queue; ··· 427 426 428 427 static inline int scsi_device_protection(struct scsi_device *sdev) 429 428 { 430 - return sdev->inquiry[5] & (1<<0); 429 + return sdev->scsi_level > SCSI_2 && sdev->inquiry[5] & (1<<0); 431 430 } 432 431 433 432 #define MODULE_ALIAS_SCSI_DEVICE(type) \
-1
kernel/nsproxy.c
··· 14 14 */ 15 15 16 16 #include <linux/module.h> 17 - #include <linux/version.h> 18 17 #include <linux/nsproxy.h> 19 18 #include <linux/init_task.h> 20 19 #include <linux/mnt_namespace.h>
-1
kernel/power/swap.c
··· 14 14 #include <linux/module.h> 15 15 #include <linux/file.h> 16 16 #include <linux/utsname.h> 17 - #include <linux/version.h> 18 17 #include <linux/delay.h> 19 18 #include <linux/bitops.h> 20 19 #include <linux/genhd.h>
+1
kernel/rcupdate.c
··· 77 77 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), 78 78 * and may be nested. 79 79 */ 80 + void synchronize_rcu(void); /* Makes kernel-doc tools happy */ 80 81 synchronize_rcu_xxx(synchronize_rcu, call_rcu) 81 82 EXPORT_SYMBOL_GPL(synchronize_rcu); 82 83
+1 -1
kernel/sched_features.h
··· 8 8 SCHED_FEAT(HRTICK, 1) 9 9 SCHED_FEAT(DOUBLE_TICK, 0) 10 10 SCHED_FEAT(ASYM_GRAN, 1) 11 - SCHED_FEAT(LB_BIAS, 0) 11 + SCHED_FEAT(LB_BIAS, 1) 12 12 SCHED_FEAT(LB_WAKEUP_UPDATE, 1) 13 13 SCHED_FEAT(ASYM_EFF_LOAD, 1)
+3 -2
kernel/signal.c
··· 1338 1338 struct siginfo info; 1339 1339 unsigned long flags; 1340 1340 struct sighand_struct *psig; 1341 + int ret = sig; 1341 1342 1342 1343 BUG_ON(sig == -1); 1343 1344 ··· 1403 1402 * is implementation-defined: we do (if you don't want 1404 1403 * it, just use SIG_IGN instead). 1405 1404 */ 1406 - tsk->exit_signal = -1; 1405 + ret = tsk->exit_signal = -1; 1407 1406 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1408 1407 sig = -1; 1409 1408 } ··· 1412 1411 __wake_up_parent(tsk, tsk->parent); 1413 1412 spin_unlock_irqrestore(&psig->siglock, flags); 1414 1413 1415 - return sig; 1414 + return ret; 1416 1415 } 1417 1416 1418 1417 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
+4 -4
kernel/sys.c
··· 169 169 pgrp = find_vpid(who); 170 170 else 171 171 pgrp = task_pgrp(current); 172 - do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 172 + do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 173 173 error = set_one_prio(p, niceval, error); 174 - } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 174 + } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 175 175 break; 176 176 case PRIO_USER: 177 177 user = current->user; ··· 229 229 pgrp = find_vpid(who); 230 230 else 231 231 pgrp = task_pgrp(current); 232 - do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 232 + do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 233 233 niceval = 20 - task_nice(p); 234 234 if (niceval > retval) 235 235 retval = niceval; 236 - } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 236 + } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 237 237 break; 238 238 case PRIO_USER: 239 239 user = current->user;
+5 -1
kernel/time/tick-sched.c
··· 643 643 ts->nohz_mode = NOHZ_MODE_HIGHRES; 644 644 #endif 645 645 } 646 + #endif /* HIGH_RES_TIMERS */ 646 647 648 + #if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS 647 649 void tick_cancel_sched_timer(int cpu) 648 650 { 649 651 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 650 652 653 + # ifdef CONFIG_HIGH_RES_TIMERS 651 654 if (ts->sched_timer.base) 652 655 hrtimer_cancel(&ts->sched_timer); 656 + # endif 653 657 654 658 ts->nohz_mode = NOHZ_MODE_INACTIVE; 655 659 } 656 - #endif /* HIGH_RES_TIMERS */ 660 + #endif 657 661 658 662 /** 659 663 * Async notification about clocksource changes
-1
kernel/user_namespace.c
··· 6 6 */ 7 7 8 8 #include <linux/module.h> 9 - #include <linux/version.h> 10 9 #include <linux/nsproxy.h> 11 10 #include <linux/slab.h> 12 11 #include <linux/user_namespace.h>
-1
kernel/utsname.c
··· 12 12 #include <linux/module.h> 13 13 #include <linux/uts.h> 14 14 #include <linux/utsname.h> 15 - #include <linux/version.h> 16 15 #include <linux/err.h> 17 16 #include <linux/slab.h> 18 17
-1
kernel/utsname_sysctl.c
··· 12 12 #include <linux/module.h> 13 13 #include <linux/uts.h> 14 14 #include <linux/utsname.h> 15 - #include <linux/version.h> 16 15 #include <linux/sysctl.h> 17 16 18 17 static void *get_uts(ctl_table *table, int write)
+1 -2
lib/kobject.c
··· 223 223 return -ENOMEM; 224 224 225 225 /* ewww... some of these buggers have '/' in the name ... */ 226 - s = strchr(kobj->name, '/'); 227 - if (s) 226 + while ((s = strchr(kobj->name, '/'))) 228 227 s[0] = '!'; 229 228 230 229 kfree(old_name);
+29 -6
mm/bootmem.c
··· 405 405 } 406 406 #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ 407 407 408 + static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx, 409 + unsigned long step) 410 + { 411 + unsigned long base = bdata->node_min_pfn; 412 + 413 + /* 414 + * Align the index with respect to the node start so that the 415 + * combination of both satisfies the requested alignment. 416 + */ 417 + 418 + return ALIGN(base + idx, step) - base; 419 + } 420 + 421 + static unsigned long align_off(struct bootmem_data *bdata, unsigned long off, 422 + unsigned long align) 423 + { 424 + unsigned long base = PFN_PHYS(bdata->node_min_pfn); 425 + 426 + /* Same as align_idx for byte offsets */ 427 + 428 + return ALIGN(base + off, align) - base; 429 + } 430 + 408 431 static void * __init alloc_bootmem_core(struct bootmem_data *bdata, 409 432 unsigned long size, unsigned long align, 410 433 unsigned long goal, unsigned long limit) ··· 464 441 else 465 442 start = ALIGN(min, step); 466 443 467 - sidx = start - bdata->node_min_pfn;; 444 + sidx = start - bdata->node_min_pfn; 468 445 midx = max - bdata->node_min_pfn; 469 446 470 447 if (bdata->hint_idx > sidx) { ··· 473 450 * catch the fallback below. 474 451 */ 475 452 fallback = sidx + 1; 476 - sidx = ALIGN(bdata->hint_idx, step); 453 + sidx = align_idx(bdata, bdata->hint_idx, step); 477 454 } 478 455 479 456 while (1) { ··· 482 459 unsigned long eidx, i, start_off, end_off; 483 460 find_block: 484 461 sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx); 485 - sidx = ALIGN(sidx, step); 462 + sidx = align_idx(bdata, sidx, step); 486 463 eidx = sidx + PFN_UP(size); 487 464 488 465 if (sidx >= midx || eidx > midx) ··· 490 467 491 468 for (i = sidx; i < eidx; i++) 492 469 if (test_bit(i, bdata->node_bootmem_map)) { 493 - sidx = ALIGN(i, step); 470 + sidx = align_idx(bdata, i, step); 494 471 if (sidx == i) 495 472 sidx += step; 496 473 goto find_block; ··· 498 475 499 476 if (bdata->last_end_off & (PAGE_SIZE - 1) && 500 477 PFN_DOWN(bdata->last_end_off) + 1 == sidx) 501 - start_off = ALIGN(bdata->last_end_off, align); 478 + start_off = align_off(bdata, bdata->last_end_off, align); 502 479 else 503 480 start_off = PFN_PHYS(sidx); 504 481 ··· 522 499 } 523 500 524 501 if (fallback) { 525 - sidx = ALIGN(fallback - 1, step); 502 + sidx = align_idx(bdata, fallback - 1, step); 526 503 fallback = 0; 527 504 goto find_block; 528 505 }
+50 -15
mm/filemap_xip.c
··· 15 15 #include <linux/rmap.h> 16 16 #include <linux/mmu_notifier.h> 17 17 #include <linux/sched.h> 18 + #include <linux/seqlock.h> 19 + #include <linux/mutex.h> 18 20 #include <asm/tlbflush.h> 19 21 #include <asm/io.h> 20 22 ··· 24 22 * We do use our own empty page to avoid interference with other users 25 23 * of ZERO_PAGE(), such as /dev/zero 26 24 */ 25 + static DEFINE_MUTEX(xip_sparse_mutex); 26 + static seqcount_t xip_sparse_seq = SEQCNT_ZERO; 27 27 static struct page *__xip_sparse_page; 28 28 29 + /* called under xip_sparse_mutex */ 29 30 static struct page *xip_sparse_page(void) 30 31 { 31 32 if (!__xip_sparse_page) { 32 33 struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO); 33 34 34 - if (page) { 35 - static DEFINE_SPINLOCK(xip_alloc_lock); 36 - spin_lock(&xip_alloc_lock); 37 - if (!__xip_sparse_page) 38 - __xip_sparse_page = page; 39 - else 40 - __free_page(page); 41 - spin_unlock(&xip_alloc_lock); 42 - } 35 + if (page) 36 + __xip_sparse_page = page; 43 37 } 44 38 return __xip_sparse_page; 45 39 } ··· 172 174 pte_t pteval; 173 175 spinlock_t *ptl; 174 176 struct page *page; 177 + unsigned count; 178 + int locked = 0; 179 + 180 + count = read_seqcount_begin(&xip_sparse_seq); 175 181 176 182 page = __xip_sparse_page; 177 183 if (!page) 178 184 return; 179 185 186 + retry: 180 187 spin_lock(&mapping->i_mmap_lock); 181 188 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 182 189 mm = vma->vm_mm; 183 190 address = vma->vm_start + 184 191 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 185 192 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 186 - pte = page_check_address(page, mm, address, &ptl); 193 + pte = page_check_address(page, mm, address, &ptl, 1); 187 194 if (pte) { 188 195 /* Nuke the page table entry. */ 189 196 flush_cache_page(vma, address, pte_pfn(*pte)); ··· 201 198 } 202 199 } 203 200 spin_unlock(&mapping->i_mmap_lock); 201 + 202 + if (locked) { 203 + mutex_unlock(&xip_sparse_mutex); 204 + } else if (read_seqcount_retry(&xip_sparse_seq, count)) { 205 + mutex_lock(&xip_sparse_mutex); 206 + locked = 1; 207 + goto retry; 208 + } 204 209 } 205 210 206 211 /* ··· 229 218 int error; 230 219 231 220 /* XXX: are VM_FAULT_ codes OK? */ 232 - 221 + again: 233 222 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 234 223 if (vmf->pgoff >= size) 235 224 return VM_FAULT_SIGBUS; ··· 248 237 int err; 249 238 250 239 /* maybe shared writable, allocate new block */ 240 + mutex_lock(&xip_sparse_mutex); 251 241 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1, 252 242 &xip_mem, &xip_pfn); 243 + mutex_unlock(&xip_sparse_mutex); 253 244 if (error) 254 245 return VM_FAULT_SIGBUS; 255 246 /* unmap sparse mappings at pgoff from all other vmas */ ··· 265 252 BUG_ON(err); 266 253 return VM_FAULT_NOPAGE; 267 254 } else { 255 + int err, ret = VM_FAULT_OOM; 256 + 257 + mutex_lock(&xip_sparse_mutex); 258 + write_seqcount_begin(&xip_sparse_seq); 259 + error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0, 260 + &xip_mem, &xip_pfn); 261 + if (unlikely(!error)) { 262 + write_seqcount_end(&xip_sparse_seq); 263 + mutex_unlock(&xip_sparse_mutex); 264 + goto again; 265 + } 266 + if (error != -ENODATA) 267 + goto out; 268 268 /* not shared and writable, use xip_sparse_page() */ 269 269 page = xip_sparse_page(); 270 270 if (!page) 271 - return VM_FAULT_OOM; 271 + goto out; 272 + err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, 273 + page); 274 + if (err == -ENOMEM) 275 + goto out; 272 276 273 - page_cache_get(page); 274 - vmf->page = page; 275 - return 0; 277 + ret = VM_FAULT_NOPAGE; 278 + out: 279 + write_seqcount_end(&xip_sparse_seq); 280 + mutex_unlock(&xip_sparse_mutex); 281 + 282 + return ret; 276 283 } 277 284 } 278 285 ··· 341 308 &xip_mem, &xip_pfn); 342 309 if (status == -ENODATA) { 343 310 /* we allocate a new page unmap it */ 311 + mutex_lock(&xip_sparse_mutex); 344 312 status = a_ops->get_xip_mem(mapping, index, 1, 345 313 &xip_mem, &xip_pfn); 314 + mutex_unlock(&xip_sparse_mutex); 346 315 if (!status) 347 316 /* unmap page at pgoff from all other vmas */ 348 317 __xip_unmap(mapping, index);
+1 -1
mm/mm_init.c
··· 12 12 #include "internal.h" 13 13 14 14 #ifdef CONFIG_DEBUG_MEMORY_INIT 15 - int __meminitdata mminit_loglevel; 15 + int mminit_loglevel; 16 16 17 17 #ifndef SECTIONS_SHIFT 18 18 #define SECTIONS_SHIFT 0
+25 -14
mm/rmap.c
··· 224 224 /* 225 225 * Check that @page is mapped at @address into @mm. 226 226 * 227 + * If @sync is false, page_check_address may perform a racy check to avoid 228 + * the page table lock when the pte is not present (helpful when reclaiming 229 + * highly shared pages). 230 + * 227 231 * On success returns with pte mapped and locked. 228 232 */ 229 233 pte_t *page_check_address(struct page *page, struct mm_struct *mm, 230 - unsigned long address, spinlock_t **ptlp) 234 + unsigned long address, spinlock_t **ptlp, int sync) 231 235 { 232 236 pgd_t *pgd; 233 237 pud_t *pud; ··· 253 249 254 250 pte = pte_offset_map(pmd, address); 255 251 /* Make a quick check before getting the lock */ 256 - if (!pte_present(*pte)) { 252 + if (!sync && !pte_present(*pte)) { 257 253 pte_unmap(pte); 258 254 return NULL; 259 255 } ··· 285 281 if (address == -EFAULT) 286 282 goto out; 287 283 288 - pte = page_check_address(page, mm, address, &ptl); 284 + pte = page_check_address(page, mm, address, &ptl, 0); 289 285 if (!pte) 290 286 goto out; 291 287 ··· 454 450 if (address == -EFAULT) 455 451 goto out; 456 452 457 - pte = page_check_address(page, mm, address, &ptl); 453 + pte = page_check_address(page, mm, address, &ptl, 1); 458 454 if (!pte) 459 455 goto out; 460 456 ··· 663 659 } 664 660 665 661 /* 662 + * Now that the last pte has gone, s390 must transfer dirty 663 + * flag from storage key to struct page. We can usually skip 664 + * this if the page is anon, so about to be freed; but perhaps 665 + * not if it's in swapcache - there might be another pte slot 666 + * containing the swap entry, but page not yet written to swap. 667 + */ 668 + if ((!PageAnon(page) || PageSwapCache(page)) && 669 + page_test_dirty(page)) { 670 + page_clear_dirty(page); 671 + set_page_dirty(page); 672 + } 673 + 674 + mem_cgroup_uncharge_page(page); 675 + __dec_zone_page_state(page, 676 + PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); 677 + /* 666 678 * It would be tidy to reset the PageAnon mapping here, 667 679 * but that might overwrite a racing page_add_anon_rmap 668 680 * which increments mapcount after us but sets mapping ··· 687 667 * Leaving it set also helps swapoff to reinstate ptes 688 668 * faster for those pages still in swapcache. 689 669 */ 690 - if ((!PageAnon(page) || PageSwapCache(page)) && 691 - page_test_dirty(page)) { 692 - page_clear_dirty(page); 693 - set_page_dirty(page); 694 - } 695 - mem_cgroup_uncharge_page(page); 696 - 697 - __dec_zone_page_state(page, 698 - PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); 699 670 } 700 671 } 701 672 ··· 708 697 if (address == -EFAULT) 709 698 goto out; 710 699 711 - pte = page_check_address(page, mm, address, &ptl); 700 + pte = page_check_address(page, mm, address, &ptl, 0); 712 701 if (!pte) 713 702 goto out; 714 703
+1 -1
mm/swap_state.c
··· 60 60 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", 61 61 swap_cache_info.add_total, swap_cache_info.del_total, 62 62 swap_cache_info.find_success, swap_cache_info.find_total); 63 - printk("Free swap = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10)); 63 + printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10)); 64 64 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); 65 65 } 66 66
+1 -1
net/bluetooth/af_bluetooth.c
··· 456 456 subsys_initcall(bt_init); 457 457 module_exit(bt_exit); 458 458 459 - MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); 459 + MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 460 460 MODULE_DESCRIPTION("Bluetooth Core ver " VERSION); 461 461 MODULE_VERSION(VERSION); 462 462 MODULE_LICENSE("GPL");
+1 -1
net/bluetooth/bnep/core.c
··· 736 736 module_param(compress_dst, bool, 0644); 737 737 MODULE_PARM_DESC(compress_dst, "Compress destination headers"); 738 738 739 - MODULE_AUTHOR("David Libault <david.libault@inventel.fr>, Maxim Krasnyansky <maxk@qualcomm.com>"); 739 + MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 740 740 MODULE_DESCRIPTION("Bluetooth BNEP ver " VERSION); 741 741 MODULE_VERSION(VERSION); 742 742 MODULE_LICENSE("GPL");
+189 -187
net/bluetooth/hci_sysfs.c
··· 3 3 #include <linux/kernel.h> 4 4 #include <linux/init.h> 5 5 6 - #include <linux/platform_device.h> 7 - 8 6 #include <net/bluetooth/bluetooth.h> 9 7 #include <net/bluetooth/hci_core.h> 10 8 ··· 10 12 #undef BT_DBG 11 13 #define BT_DBG(D...) 12 14 #endif 15 + 16 + struct class *bt_class = NULL; 17 + EXPORT_SYMBOL_GPL(bt_class); 18 + 13 19 static struct workqueue_struct *btaddconn; 14 20 static struct workqueue_struct *btdelconn; 15 21 16 - static inline char *typetostr(int type) 22 + static inline char *link_typetostr(int type) 23 + { 24 + switch (type) { 25 + case ACL_LINK: 26 + return "ACL"; 27 + case SCO_LINK: 28 + return "SCO"; 29 + case ESCO_LINK: 30 + return "eSCO"; 31 + default: 32 + return "UNKNOWN"; 33 + } 34 + } 35 + 36 + static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf) 37 + { 38 + struct hci_conn *conn = dev_get_drvdata(dev); 39 + return sprintf(buf, "%s\n", link_typetostr(conn->type)); 40 + } 41 + 42 + static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf) 43 + { 44 + struct hci_conn *conn = dev_get_drvdata(dev); 45 + bdaddr_t bdaddr; 46 + baswap(&bdaddr, &conn->dst); 47 + return sprintf(buf, "%s\n", batostr(&bdaddr)); 48 + } 49 + 50 + static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf) 51 + { 52 + struct hci_conn *conn = dev_get_drvdata(dev); 53 + 54 + return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 55 + conn->features[0], conn->features[1], 56 + conn->features[2], conn->features[3], 57 + conn->features[4], conn->features[5], 58 + conn->features[6], conn->features[7]); 59 + } 60 + 61 + #define LINK_ATTR(_name,_mode,_show,_store) \ 62 + struct device_attribute link_attr_##_name = __ATTR(_name,_mode,_show,_store) 63 + 64 + static LINK_ATTR(type, S_IRUGO, show_link_type, NULL); 65 + static LINK_ATTR(address, S_IRUGO, show_link_address, NULL); 66 + static LINK_ATTR(features, S_IRUGO, show_link_features, NULL); 67 + 68 + static struct attribute *bt_link_attrs[] = { 69 + &link_attr_type.attr, 70 + &link_attr_address.attr, 71 + &link_attr_features.attr, 72 + NULL 73 + }; 74 + 75 + static struct attribute_group bt_link_group = { 76 + .attrs = bt_link_attrs, 77 + }; 78 + 79 + static struct attribute_group *bt_link_groups[] = { 80 + &bt_link_group, 81 + NULL 82 + }; 83 + 84 + static void bt_link_release(struct device *dev) 85 + { 86 + void *data = dev_get_drvdata(dev); 87 + kfree(data); 88 + } 89 + 90 + static struct device_type bt_link = { 91 + .name = "link", 92 + .groups = bt_link_groups, 93 + .release = bt_link_release, 94 + }; 95 + 96 + static void add_conn(struct work_struct *work) 97 + { 98 + struct hci_conn *conn = container_of(work, struct hci_conn, work); 99 + 100 + flush_workqueue(btdelconn); 101 + 102 + if (device_add(&conn->dev) < 0) { 103 + BT_ERR("Failed to register connection device"); 104 + return; 105 + } 106 + } 107 + 108 + void hci_conn_add_sysfs(struct hci_conn *conn) 109 + { 110 + struct hci_dev *hdev = conn->hdev; 111 + 112 + BT_DBG("conn %p", conn); 113 + 114 + conn->dev.type = &bt_link; 115 + conn->dev.class = bt_class; 116 + conn->dev.parent = &hdev->dev; 117 + 118 + snprintf(conn->dev.bus_id, BUS_ID_SIZE, "%s:%d", 119 + hdev->name, conn->handle); 120 + 121 + dev_set_drvdata(&conn->dev, conn); 122 + 123 + device_initialize(&conn->dev); 124 + 125 + INIT_WORK(&conn->work, add_conn); 126 + 127 + queue_work(btaddconn, &conn->work); 128 + } 129 + 130 + /* 131 + * The rfcomm tty device will possibly retain even when conn 132 + * is down, and sysfs doesn't support move zombie device, 133 + * so we should move the device before conn device is destroyed. 134 + */ 135 + static int __match_tty(struct device *dev, void *data) 136 + { 137 + return !strncmp(dev->bus_id, "rfcomm", 6); 138 + } 139 + 140 + static void del_conn(struct work_struct *work) 141 + { 142 + struct hci_conn *conn = container_of(work, struct hci_conn, work); 143 + struct hci_dev *hdev = conn->hdev; 144 + 145 + while (1) { 146 + struct device *dev; 147 + 148 + dev = device_find_child(&conn->dev, NULL, __match_tty); 149 + if (!dev) 150 + break; 151 + device_move(dev, NULL); 152 + put_device(dev); 153 + } 154 + 155 + device_del(&conn->dev); 156 + put_device(&conn->dev); 157 + hci_dev_put(hdev); 158 + } 159 + 160 + void hci_conn_del_sysfs(struct hci_conn *conn) 161 + { 162 + BT_DBG("conn %p", conn); 163 + 164 + if (!device_is_registered(&conn->dev)) 165 + return; 166 + 167 + INIT_WORK(&conn->work, del_conn); 168 + 169 + queue_work(btdelconn, &conn->work); 170 + } 171 + 172 + static inline char *host_typetostr(int type) 17 173 { 18 174 switch (type) { 19 175 case HCI_VIRTUAL: ··· 192 40 static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) 193 41 { 194 42 struct hci_dev *hdev = dev_get_drvdata(dev); 195 - return sprintf(buf, "%s\n", typetostr(hdev->type)); 43 + return sprintf(buf, "%s\n", host_typetostr(hdev->type)); 196 44 } 197 45 198 46 static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) ··· 373 221 static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, 374 222 show_sniff_min_interval, store_sniff_min_interval); 375 223 376 - static struct device_attribute *bt_attrs[] = { 377 - &dev_attr_type, 378 - &dev_attr_name, 379 - &dev_attr_class, 380 - &dev_attr_address, 381 - &dev_attr_features, 382 - &dev_attr_manufacturer, 383 - &dev_attr_hci_version, 384 - &dev_attr_hci_revision, 385 - &dev_attr_inquiry_cache, 386 - &dev_attr_idle_timeout, 387 - &dev_attr_sniff_max_interval, 388 - &dev_attr_sniff_min_interval, 224 + static struct attribute *bt_host_attrs[] = { 225 + &dev_attr_type.attr, 226 + &dev_attr_name.attr, 227 + &dev_attr_class.attr, 228 + &dev_attr_address.attr, 229 + &dev_attr_features.attr, 230 + &dev_attr_manufacturer.attr, 231 + &dev_attr_hci_version.attr, 232 + &dev_attr_hci_revision.attr, 233 + &dev_attr_inquiry_cache.attr, 234 + &dev_attr_idle_timeout.attr, 235 + &dev_attr_sniff_max_interval.attr, 236 + &dev_attr_sniff_min_interval.attr, 389 237 NULL 390 238 }; 391 239 392 - static ssize_t show_conn_type(struct device *dev, struct device_attribute *attr, char *buf) 393 - { 394 - struct hci_conn *conn = dev_get_drvdata(dev); 395 - return sprintf(buf, "%s\n", conn->type == ACL_LINK ? "ACL" : "SCO"); 396 - } 240 + static struct attribute_group bt_host_group = { 241 + .attrs = bt_host_attrs, 242 + }; 397 243 398 - static ssize_t show_conn_address(struct device *dev, struct device_attribute *attr, char *buf) 399 - { 400 - struct hci_conn *conn = dev_get_drvdata(dev); 401 - bdaddr_t bdaddr; 402 - baswap(&bdaddr, &conn->dst); 403 - return sprintf(buf, "%s\n", batostr(&bdaddr)); 404 - } 405 - 406 - static ssize_t show_conn_features(struct device *dev, struct device_attribute *attr, char *buf) 407 - { 408 - struct hci_conn *conn = dev_get_drvdata(dev); 409 - 410 - return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 411 - conn->features[0], conn->features[1], 412 - conn->features[2], conn->features[3], 413 - conn->features[4], conn->features[5], 414 - conn->features[6], conn->features[7]); 415 - } 416 - 417 - #define CONN_ATTR(_name,_mode,_show,_store) \ 418 - struct device_attribute conn_attr_##_name = __ATTR(_name,_mode,_show,_store) 419 - 420 - static CONN_ATTR(type, S_IRUGO, show_conn_type, NULL); 421 - static CONN_ATTR(address, S_IRUGO, show_conn_address, NULL); 422 - static CONN_ATTR(features, S_IRUGO, show_conn_features, NULL); 423 - 424 - static struct device_attribute *conn_attrs[] = { 425 - &conn_attr_type, 426 - &conn_attr_address, 427 - &conn_attr_features, 244 + static struct attribute_group *bt_host_groups[] = { 245 + &bt_host_group, 428 246 NULL 429 247 }; 430 248 431 - struct class *bt_class = NULL; 432 - EXPORT_SYMBOL_GPL(bt_class); 433 - 434 - static struct bus_type bt_bus = { 435 - .name = "bluetooth", 436 - }; 437 - 438 - static struct platform_device *bt_platform; 439 - 440 - static void bt_release(struct device *dev) 249 + static void bt_host_release(struct device *dev) 441 250 { 442 251 void *data = dev_get_drvdata(dev); 443 252 kfree(data); 444 253 } 445 254 446 - static void add_conn(struct work_struct *work) 447 - { 448 - struct hci_conn *conn = container_of(work, struct hci_conn, work); 449 - int i; 450 - 451 - flush_workqueue(btdelconn); 452 - 453 - if (device_add(&conn->dev) < 0) { 454 - BT_ERR("Failed to register connection device"); 455 - return; 456 - } 457 - 458 - for (i = 0; conn_attrs[i]; i++) 459 - if (device_create_file(&conn->dev, conn_attrs[i]) < 0) 460 - BT_ERR("Failed to create connection attribute"); 461 - } 462 - 463 - void hci_conn_add_sysfs(struct hci_conn *conn) 464 - { 465 - struct hci_dev *hdev = conn->hdev; 466 - 467 - BT_DBG("conn %p", conn); 468 - 469 - conn->dev.bus = &bt_bus; 470 - conn->dev.parent = &hdev->dev; 471 - 472 - conn->dev.release = bt_release; 473 - 474 - snprintf(conn->dev.bus_id, BUS_ID_SIZE, "%s:%d", 475 - hdev->name, conn->handle); 476 - 477 - dev_set_drvdata(&conn->dev, conn); 478 - 479 - device_initialize(&conn->dev); 480 - 481 - INIT_WORK(&conn->work, add_conn); 482 - 483 - queue_work(btaddconn, &conn->work); 484 - } 485 - 486 - /* 487 - * The rfcomm tty device will possibly retain even when conn 488 - * is down, and sysfs doesn't support move zombie device, 489 - * so we should move the device before conn device is destroyed. 490 - */ 491 - static int __match_tty(struct device *dev, void *data) 492 - { 493 - return !strncmp(dev->bus_id, "rfcomm", 6); 494 - } 495 - 496 - static void del_conn(struct work_struct *work) 497 - { 498 - struct hci_conn *conn = container_of(work, struct hci_conn, work); 499 - struct hci_dev *hdev = conn->hdev; 500 - 501 - while (1) { 502 - struct device *dev; 503 - 504 - dev = device_find_child(&conn->dev, NULL, __match_tty); 505 - if (!dev) 506 - break; 507 - device_move(dev, NULL); 508 - put_device(dev); 509 - } 510 - 511 - device_del(&conn->dev); 512 - put_device(&conn->dev); 513 - hci_dev_put(hdev); 514 - } 515 - 516 - void hci_conn_del_sysfs(struct hci_conn *conn) 517 - { 518 - BT_DBG("conn %p", conn); 519 - 520 - if (!device_is_registered(&conn->dev)) 521 - return; 522 - 523 - INIT_WORK(&conn->work, del_conn); 524 - 525 - queue_work(btdelconn, &conn->work); 526 - } 255 + static struct device_type bt_host = { 256 + .name = "host", 257 + .groups = bt_host_groups, 258 + .release = bt_host_release, 259 + }; 527 260 528 261 int hci_register_sysfs(struct hci_dev *hdev) 529 262 { 530 263 struct device *dev = &hdev->dev; 531 - unsigned int i; 532 264 int err; 533 265 534 266 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 535 267 536 - dev->bus = &bt_bus; 268 + dev->type = &bt_host; 269 + dev->class = bt_class; 537 270 dev->parent = hdev->parent; 538 271 539 272 strlcpy(dev->bus_id, hdev->name, BUS_ID_SIZE); 540 - 541 - dev->release = bt_release; 542 273 543 274 dev_set_drvdata(dev, hdev); 544 275 545 276 err = device_register(dev); 546 277 if (err < 0) 547 278 return err; 548 - 549 - for (i = 0; bt_attrs[i]; i++) 550 - if (device_create_file(dev, bt_attrs[i]) < 0) 551 - BT_ERR("Failed to create device attribute"); 552 279 553 280 return 0; 554 281 } ··· 441 410 442 411 int __init bt_sysfs_init(void) 443 412 { 444 - int err; 445 - 446 413 btaddconn = create_singlethread_workqueue("btaddconn"); 447 - if (!btaddconn) { 448 - err = -ENOMEM; 449 - goto out; 450 - } 414 + if (!btaddconn) 415 + return -ENOMEM; 451 416 452 417 btdelconn = create_singlethread_workqueue("btdelconn"); 453 418 if (!btdelconn) { 454 - err = -ENOMEM; 455 - goto out_del; 419 + destroy_workqueue(btaddconn); 420 + return -ENOMEM; 456 421 } 457 - 458 - bt_platform = platform_device_register_simple("bluetooth", -1, NULL, 0); 459 - if (IS_ERR(bt_platform)) { 460 - err = PTR_ERR(bt_platform); 461 - goto out_platform; 462 - } 463 - 464 - err = bus_register(&bt_bus); 465 - if (err < 0) 466 - goto out_bus; 467 422 468 423 bt_class = class_create(THIS_MODULE, "bluetooth"); 469 424 if (IS_ERR(bt_class)) { 470 - err = PTR_ERR(bt_class); 471 - goto out_class; 425 + destroy_workqueue(btdelconn); 426 + destroy_workqueue(btaddconn); 427 + return PTR_ERR(bt_class); 472 428 } 473 429 474 430 return 0; 475 - 476 - out_class: 477 - bus_unregister(&bt_bus); 478 - out_bus: 479 - platform_device_unregister(bt_platform); 480 - out_platform: 481 - destroy_workqueue(btdelconn); 482 - out_del: 483 - destroy_workqueue(btaddconn); 484 - out: 485 - return err; 486 431 } 487 432 488 433 void bt_sysfs_cleanup(void) 489 434 { 490 435 destroy_workqueue(btaddconn); 491 - 492 436 destroy_workqueue(btdelconn); 493 437 494 438 class_destroy(bt_class); 495 - 496 - bus_unregister(&bt_bus); 497 - 498 - platform_device_unregister(bt_platform); 499 439 }
+1 -1
net/bluetooth/l2cap.c
··· 2516 2516 module_init(l2cap_init); 2517 2517 module_exit(l2cap_exit); 2518 2518 2519 - MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); 2519 + MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 2520 2520 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION); 2521 2521 MODULE_VERSION(VERSION); 2522 2522 MODULE_LICENSE("GPL");
+1 -1
net/bluetooth/rfcomm/core.c
··· 2115 2115 module_param(l2cap_mtu, uint, 0644); 2116 2116 MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); 2117 2117 2118 - MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); 2118 + MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 2119 2119 MODULE_DESCRIPTION("Bluetooth RFCOMM ver " VERSION); 2120 2120 MODULE_VERSION(VERSION); 2121 2121 MODULE_LICENSE("GPL");
+1 -1
net/bluetooth/sco.c
··· 1002 1002 module_param(disable_esco, bool, 0644); 1003 1003 MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation"); 1004 1004 1005 - MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); 1005 + MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 1006 1006 MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION); 1007 1007 MODULE_VERSION(VERSION); 1008 1008 MODULE_LICENSE("GPL");
+10 -5
net/bridge/br_device.c
··· 148 148 } 149 149 150 150 static struct ethtool_ops br_ethtool_ops = { 151 - .get_drvinfo = br_getinfo, 152 - .get_link = ethtool_op_get_link, 153 - .set_sg = br_set_sg, 154 - .set_tx_csum = br_set_tx_csum, 155 - .set_tso = br_set_tso, 151 + .get_drvinfo = br_getinfo, 152 + .get_link = ethtool_op_get_link, 153 + .get_tx_csum = ethtool_op_get_tx_csum, 154 + .set_tx_csum = br_set_tx_csum, 155 + .get_sg = ethtool_op_get_sg, 156 + .set_sg = br_set_sg, 157 + .get_tso = ethtool_op_get_tso, 158 + .set_tso = br_set_tso, 159 + .get_ufo = ethtool_op_get_ufo, 160 + .get_flags = ethtool_op_get_flags, 156 161 }; 157 162 158 163 void br_dev_setup(struct net_device *dev)
+87
net/core/datagram.c
··· 339 339 return -EFAULT; 340 340 } 341 341 342 + /** 343 + * skb_copy_datagram_from_iovec - Copy a datagram from an iovec. 344 + * @skb: buffer to copy 345 + * @offset: offset in the buffer to start copying to 346 + * @from: io vector to copy to 347 + * @len: amount of data to copy to buffer from iovec 348 + * 349 + * Returns 0 or -EFAULT. 350 + * Note: the iovec is modified during the copy. 351 + */ 352 + int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, 353 + struct iovec *from, int len) 354 + { 355 + int start = skb_headlen(skb); 356 + int i, copy = start - offset; 357 + 358 + /* Copy header. */ 359 + if (copy > 0) { 360 + if (copy > len) 361 + copy = len; 362 + if (memcpy_fromiovec(skb->data + offset, from, copy)) 363 + goto fault; 364 + if ((len -= copy) == 0) 365 + return 0; 366 + offset += copy; 367 + } 368 + 369 + /* Copy paged appendix. Hmm... why does this look so complicated? */ 370 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 371 + int end; 372 + 373 + WARN_ON(start > offset + len); 374 + 375 + end = start + skb_shinfo(skb)->frags[i].size; 376 + if ((copy = end - offset) > 0) { 377 + int err; 378 + u8 *vaddr; 379 + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 380 + struct page *page = frag->page; 381 + 382 + if (copy > len) 383 + copy = len; 384 + vaddr = kmap(page); 385 + err = memcpy_fromiovec(vaddr + frag->page_offset + 386 + offset - start, from, copy); 387 + kunmap(page); 388 + if (err) 389 + goto fault; 390 + 391 + if (!(len -= copy)) 392 + return 0; 393 + offset += copy; 394 + } 395 + start = end; 396 + } 397 + 398 + if (skb_shinfo(skb)->frag_list) { 399 + struct sk_buff *list = skb_shinfo(skb)->frag_list; 400 + 401 + for (; list; list = list->next) { 402 + int end; 403 + 404 + WARN_ON(start > offset + len); 405 + 406 + end = start + list->len; 407 + if ((copy = end - offset) > 0) { 408 + if (copy > len) 409 + copy = len; 410 + if (skb_copy_datagram_from_iovec(list, 411 + offset - start, 412 + from, copy)) 413 + goto fault; 414 + if ((len -= copy) == 0) 415 + return 0; 416 + offset += copy; 417 + } 418 + start = end; 419 + } 420 + } 421 + if (!len) 422 + return 0; 423 + 424 + fault: 425 + return -EFAULT; 426 + } 427 + EXPORT_SYMBOL(skb_copy_datagram_from_iovec); 428 + 342 429 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, 343 430 u8 __user *to, int len, 344 431 __wsum *csump)
+30 -19
net/core/dev.c
··· 1339 1339 } 1340 1340 1341 1341 1342 + static inline void __netif_reschedule(struct Qdisc *q) 1343 + { 1344 + struct softnet_data *sd; 1345 + unsigned long flags; 1346 + 1347 + local_irq_save(flags); 1348 + sd = &__get_cpu_var(softnet_data); 1349 + q->next_sched = sd->output_queue; 1350 + sd->output_queue = q; 1351 + raise_softirq_irqoff(NET_TX_SOFTIRQ); 1352 + local_irq_restore(flags); 1353 + } 1354 + 1342 1355 void __netif_schedule(struct Qdisc *q) 1343 1356 { 1344 - if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) { 1345 - struct softnet_data *sd; 1346 - unsigned long flags; 1347 - 1348 - local_irq_save(flags); 1349 - sd = &__get_cpu_var(softnet_data); 1350 - q->next_sched = sd->output_queue; 1351 - sd->output_queue = q; 1352 - raise_softirq_irqoff(NET_TX_SOFTIRQ); 1353 - local_irq_restore(flags); 1354 - } 1357 + if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 1358 + __netif_reschedule(q); 1355 1359 } 1356 1360 EXPORT_SYMBOL(__netif_schedule); 1357 1361 ··· 1804 1800 1805 1801 spin_lock(root_lock); 1806 1802 1807 - rc = qdisc_enqueue_root(skb, q); 1808 - qdisc_run(q); 1809 - 1803 + if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 1804 + kfree_skb(skb); 1805 + rc = NET_XMIT_DROP; 1806 + } else { 1807 + rc = qdisc_enqueue_root(skb, q); 1808 + qdisc_run(q); 1809 + } 1810 1810 spin_unlock(root_lock); 1811 1811 1812 1812 goto out; ··· 1982 1974 1983 1975 head = head->next_sched; 1984 1976 1985 - smp_mb__before_clear_bit(); 1986 - clear_bit(__QDISC_STATE_SCHED, &q->state); 1987 - 1988 1977 root_lock = qdisc_lock(q); 1989 1978 if (spin_trylock(root_lock)) { 1979 + smp_mb__before_clear_bit(); 1980 + clear_bit(__QDISC_STATE_SCHED, 1981 + &q->state); 1990 1982 qdisc_run(q); 1991 1983 spin_unlock(root_lock); 1992 1984 } else { 1993 - __netif_schedule(q); 1985 + if (!test_bit(__QDISC_STATE_DEACTIVATED, 1986 + &q->state)) 1987 + __netif_reschedule(q); 1994 1988 } 1995 1989 } 1996 1990 } ··· 2094 2084 q = rxq->qdisc; 2095 2085 if (q != &noop_qdisc) { 2096 2086 spin_lock(qdisc_lock(q)); 2097 - result = qdisc_enqueue_root(skb, q); 2087 + if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) 2088 + result = qdisc_enqueue_root(skb, q); 2098 2089 spin_unlock(qdisc_lock(q)); 2099 2090 } 2100 2091
+4 -5
net/core/gen_estimator.c
··· 99 99 100 100 static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; 101 101 102 - /* Protects against NULL dereference and RCU write-side */ 102 + /* Protects against NULL dereference */ 103 103 static DEFINE_RWLOCK(est_lock); 104 104 105 105 static void est_timer(unsigned long arg) ··· 185 185 est->last_packets = bstats->packets; 186 186 est->avpps = rate_est->pps<<10; 187 187 188 - write_lock_bh(&est_lock); 189 188 if (!elist[idx].timer.function) { 190 189 INIT_LIST_HEAD(&elist[idx].list); 191 190 setup_timer(&elist[idx].timer, est_timer, idx); ··· 194 195 mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); 195 196 196 197 list_add_rcu(&est->list, &elist[idx].list); 197 - write_unlock_bh(&est_lock); 198 198 return 0; 199 199 } 200 200 ··· 212 214 * Removes the rate estimator specified by &bstats and &rate_est 213 215 * and deletes the timer. 214 216 * 217 + * NOTE: Called under rtnl_mutex 215 218 */ 216 219 void gen_kill_estimator(struct gnet_stats_basic *bstats, 217 220 struct gnet_stats_rate_est *rate_est) ··· 226 227 if (!elist[idx].timer.function) 227 228 continue; 228 229 229 - write_lock_bh(&est_lock); 230 230 list_for_each_entry_safe(e, n, &elist[idx].list, list) { 231 231 if (e->rate_est != rate_est || e->bstats != bstats) 232 232 continue; 233 233 234 + write_lock_bh(&est_lock); 234 235 e->bstats = NULL; 236 + write_unlock_bh(&est_lock); 235 237 236 238 list_del_rcu(&e->list); 237 239 call_rcu(&e->e_rcu, __gen_kill_estimator); 238 240 } 239 - write_unlock_bh(&est_lock); 240 241 } 241 242 } 242 243
+2 -10
net/core/skbuff.c
··· 2256 2256 segs = nskb; 2257 2257 tail = nskb; 2258 2258 2259 - nskb->dev = skb->dev; 2260 - skb_copy_queue_mapping(nskb, skb); 2261 - nskb->priority = skb->priority; 2262 - nskb->protocol = skb->protocol; 2263 - nskb->vlan_tci = skb->vlan_tci; 2264 - nskb->dst = dst_clone(skb->dst); 2265 - memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 2266 - nskb->pkt_type = skb->pkt_type; 2259 + __copy_skb_header(nskb, skb); 2267 2260 nskb->mac_len = skb->mac_len; 2268 2261 2269 2262 skb_reserve(nskb, headroom); ··· 2267 2274 skb_copy_from_linear_data(skb, skb_put(nskb, doffset), 2268 2275 doffset); 2269 2276 if (!sg) { 2277 + nskb->ip_summed = CHECKSUM_NONE; 2270 2278 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2271 2279 skb_put(nskb, len), 2272 2280 len, 0); ··· 2277 2283 frag = skb_shinfo(nskb)->frags; 2278 2284 k = 0; 2279 2285 2280 - nskb->ip_summed = CHECKSUM_PARTIAL; 2281 - nskb->csum = skb->csum; 2282 2286 skb_copy_from_linear_data_offset(skb, offset, 2283 2287 skb_put(nskb, hsize), hsize); 2284 2288
+6 -6
net/dccp/input.c
··· 411 411 struct dccp_sock *dp = dccp_sk(sk); 412 412 long tstamp = dccp_timestamp(); 413 413 414 - /* Stop the REQUEST timer */ 415 - inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 416 - WARN_ON(sk->sk_send_head == NULL); 417 - __kfree_skb(sk->sk_send_head); 418 - sk->sk_send_head = NULL; 419 - 420 414 if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, 421 415 dp->dccps_awl, dp->dccps_awh)) { 422 416 dccp_pr_debug("invalid ackno: S.AWL=%llu, " ··· 434 440 DCCP_SKB_CB(skb)->dccpd_seq, 435 441 DCCP_ACKVEC_STATE_RECEIVED)) 436 442 goto out_invalid_packet; /* FIXME: change error code */ 443 + 444 + /* Stop the REQUEST timer */ 445 + inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 446 + WARN_ON(sk->sk_send_head == NULL); 447 + kfree_skb(sk->sk_send_head); 448 + sk->sk_send_head = NULL; 437 449 438 450 dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq; 439 451 dccp_update_gsr(sk, dp->dccps_isr);
+14 -8
net/ipv4/icmp.c
··· 204 204 return net->ipv4.icmp_sk[smp_processor_id()]; 205 205 } 206 206 207 - static inline int icmp_xmit_lock(struct sock *sk) 207 + static inline struct sock *icmp_xmit_lock(struct net *net) 208 208 { 209 + struct sock *sk; 210 + 209 211 local_bh_disable(); 212 + 213 + sk = icmp_sk(net); 210 214 211 215 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { 212 216 /* This can happen if the output path signals a 213 217 * dst_link_failure() for an outgoing ICMP packet. 214 218 */ 215 219 local_bh_enable(); 216 - return 1; 220 + return NULL; 217 221 } 218 - return 0; 222 + return sk; 219 223 } 220 224 221 225 static inline void icmp_xmit_unlock(struct sock *sk) ··· 358 354 struct ipcm_cookie ipc; 359 355 struct rtable *rt = skb->rtable; 360 356 struct net *net = dev_net(rt->u.dst.dev); 361 - struct sock *sk = icmp_sk(net); 362 - struct inet_sock *inet = inet_sk(sk); 357 + struct sock *sk; 358 + struct inet_sock *inet; 363 359 __be32 daddr; 364 360 365 361 if (ip_options_echo(&icmp_param->replyopts, skb)) 366 362 return; 367 363 368 - if (icmp_xmit_lock(sk)) 364 + sk = icmp_xmit_lock(net); 365 + if (sk == NULL) 369 366 return; 367 + inet = inet_sk(sk); 370 368 371 369 icmp_param->data.icmph.checksum = 0; 372 370 ··· 425 419 if (!rt) 426 420 goto out; 427 421 net = dev_net(rt->u.dst.dev); 428 - sk = icmp_sk(net); 429 422 430 423 /* 431 424 * Find the original header. It is expected to be valid, of course. ··· 488 483 } 489 484 } 490 485 491 - if (icmp_xmit_lock(sk)) 486 + sk = icmp_xmit_lock(net); 487 + if (sk == NULL) 492 488 return; 493 489 494 490 /*
+1 -1
net/ipv4/netfilter/ipt_addrtype.c
··· 70 70 (info->flags & IPT_ADDRTYPE_INVERT_SOURCE); 71 71 if (ret && info->dest) 72 72 ret &= match_type(dev, iph->daddr, info->dest) ^ 73 - (info->flags & IPT_ADDRTYPE_INVERT_DEST); 73 + !!(info->flags & IPT_ADDRTYPE_INVERT_DEST); 74 74 return ret; 75 75 } 76 76
+6 -2
net/ipv4/netfilter/nf_nat_proto_common.c
··· 73 73 range_size = ntohs(range->max.all) - min + 1; 74 74 } 75 75 76 - off = *rover; 77 76 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) 78 - off = net_random(); 77 + off = secure_ipv4_port_ephemeral(tuple->src.u3.ip, tuple->dst.u3.ip, 78 + maniptype == IP_NAT_MANIP_SRC 79 + ? tuple->dst.u.all 80 + : tuple->src.u.all); 81 + else 82 + off = *rover; 79 83 80 84 for (i = 0; i < range_size; i++, off++) { 81 85 *portptr = htons(min + off % range_size);
+70 -6
net/ipv4/route.c
··· 2914 2914 return 0; 2915 2915 } 2916 2916 2917 + static void rt_secret_reschedule(int old) 2918 + { 2919 + struct net *net; 2920 + int new = ip_rt_secret_interval; 2921 + int diff = new - old; 2922 + 2923 + if (!diff) 2924 + return; 2925 + 2926 + rtnl_lock(); 2927 + for_each_net(net) { 2928 + int deleted = del_timer_sync(&net->ipv4.rt_secret_timer); 2929 + 2930 + if (!new) 2931 + continue; 2932 + 2933 + if (deleted) { 2934 + long time = net->ipv4.rt_secret_timer.expires - jiffies; 2935 + 2936 + if (time <= 0 || (time += diff) <= 0) 2937 + time = 0; 2938 + 2939 + net->ipv4.rt_secret_timer.expires = time; 2940 + } else 2941 + net->ipv4.rt_secret_timer.expires = new; 2942 + 2943 + net->ipv4.rt_secret_timer.expires += jiffies; 2944 + add_timer(&net->ipv4.rt_secret_timer); 2945 + } 2946 + rtnl_unlock(); 2947 + } 2948 + 2949 + static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write, 2950 + struct file *filp, 2951 + void __user *buffer, size_t *lenp, 2952 + loff_t *ppos) 2953 + { 2954 + int old = ip_rt_secret_interval; 2955 + int ret = proc_dointvec_jiffies(ctl, write, filp, buffer, lenp, ppos); 2956 + 2957 + rt_secret_reschedule(old); 2958 + 2959 + return ret; 2960 + } 2961 + 2962 + static int ipv4_sysctl_rt_secret_interval_strategy(ctl_table *table, 2963 + int __user *name, 2964 + int nlen, 2965 + void __user *oldval, 2966 + size_t __user *oldlenp, 2967 + void __user *newval, 2968 + size_t newlen) 2969 + { 2970 + int old = ip_rt_secret_interval; 2971 + int ret = sysctl_jiffies(table, name, nlen, oldval, oldlenp, newval, 2972 + newlen); 2973 + 2974 + rt_secret_reschedule(old); 2975 + 2976 + return ret; 2977 + } 2978 + 2917 2979 static ctl_table ipv4_route_table[] = { 2918 2980 { 2919 2981 .ctl_name = NET_IPV4_ROUTE_GC_THRESH, ··· 3110 3048 .data = &ip_rt_secret_interval, 3111 3049 .maxlen = sizeof(int), 3112 3050 .mode = 0644, 3113 - .proc_handler = &proc_dointvec_jiffies, 3114 - .strategy = &sysctl_jiffies, 3051 + .proc_handler = &ipv4_sysctl_rt_secret_interval, 3052 + .strategy = &ipv4_sysctl_rt_secret_interval_strategy, 3115 3053 }, 3116 3054 { .ctl_name = 0 } 3117 3055 }; ··· 3188 3126 net->ipv4.rt_secret_timer.data = (unsigned long)net; 3189 3127 init_timer_deferrable(&net->ipv4.rt_secret_timer); 3190 3128 3191 - net->ipv4.rt_secret_timer.expires = 3192 - jiffies + net_random() % ip_rt_secret_interval + 3193 - ip_rt_secret_interval; 3194 - add_timer(&net->ipv4.rt_secret_timer); 3129 + if (ip_rt_secret_interval) { 3130 + net->ipv4.rt_secret_timer.expires = 3131 + jiffies + net_random() % ip_rt_secret_interval + 3132 + ip_rt_secret_interval; 3133 + add_timer(&net->ipv4.rt_secret_timer); 3134 + } 3195 3135 return 0; 3196 3136 } 3197 3137
+2 -2
net/ipv6/addrconf.c
··· 1106 1106 return ret; 1107 1107 } 1108 1108 1109 - int ipv6_dev_get_saddr(struct net_device *dst_dev, 1109 + int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev, 1110 1110 const struct in6_addr *daddr, unsigned int prefs, 1111 1111 struct in6_addr *saddr) 1112 1112 { 1113 1113 struct ipv6_saddr_score scores[2], 1114 1114 *score = &scores[0], *hiscore = &scores[1]; 1115 - struct net *net = dev_net(dst_dev); 1116 1115 struct ipv6_saddr_dst dst; 1117 1116 struct net_device *dev; 1118 1117 int dst_type; ··· 1688 1689 .fc_dst_len = plen, 1689 1690 .fc_flags = RTF_UP | flags, 1690 1691 .fc_nlinfo.nl_net = dev_net(dev), 1692 + .fc_protocol = RTPROT_KERNEL, 1691 1693 }; 1692 1694 1693 1695 ipv6_addr_copy(&cfg.fc_dst, pfx);
+2 -1
net/ipv6/fib6_rules.c
··· 93 93 if (flags & RT6_LOOKUP_F_SRCPREF_COA) 94 94 srcprefs |= IPV6_PREFER_SRC_COA; 95 95 96 - if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev, 96 + if (ipv6_dev_get_saddr(net, 97 + ip6_dst_idev(&rt->u.dst)->dev, 97 98 &flp->fl6_dst, srcprefs, 98 99 &saddr)) 99 100 goto again;
+12 -11
net/ipv6/icmp.c
··· 91 91 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 92 92 }; 93 93 94 - static __inline__ int icmpv6_xmit_lock(struct sock *sk) 94 + static __inline__ struct sock *icmpv6_xmit_lock(struct net *net) 95 95 { 96 + struct sock *sk; 97 + 96 98 local_bh_disable(); 97 99 100 + sk = icmpv6_sk(net); 98 101 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { 99 102 /* This can happen if the output path (f.e. SIT or 100 103 * ip6ip6 tunnel) signals dst_link_failure() for an 101 104 * outgoing ICMP6 packet. 102 105 */ 103 106 local_bh_enable(); 104 - return 1; 107 + return NULL; 105 108 } 106 - return 0; 109 + return sk; 107 110 } 108 111 109 112 static __inline__ void icmpv6_xmit_unlock(struct sock *sk) ··· 395 392 fl.fl_icmp_code = code; 396 393 security_skb_classify_flow(skb, &fl); 397 394 398 - sk = icmpv6_sk(net); 399 - np = inet6_sk(sk); 400 - 401 - if (icmpv6_xmit_lock(sk)) 395 + sk = icmpv6_xmit_lock(net); 396 + if (sk == NULL) 402 397 return; 398 + np = inet6_sk(sk); 403 399 404 400 if (!icmpv6_xrlim_allow(sk, type, &fl)) 405 401 goto out; ··· 541 539 fl.fl_icmp_type = ICMPV6_ECHO_REPLY; 542 540 security_skb_classify_flow(skb, &fl); 543 541 544 - sk = icmpv6_sk(net); 545 - np = inet6_sk(sk); 546 - 547 - if (icmpv6_xmit_lock(sk)) 542 + sk = icmpv6_xmit_lock(net); 543 + if (sk == NULL) 548 544 return; 545 + np = inet6_sk(sk); 549 546 550 547 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) 551 548 fl.oif = np->mcast_oif;
+1
net/ipv6/ip6_fib.c
··· 378 378 379 379 arg.skb = skb; 380 380 arg.cb = cb; 381 + arg.net = net; 381 382 w->args = &arg; 382 383 383 384 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
+1 -1
net/ipv6/ip6_output.c
··· 934 934 goto out_err_release; 935 935 936 936 if (ipv6_addr_any(&fl->fl6_src)) { 937 - err = ipv6_dev_get_saddr(ip6_dst_idev(*dst)->dev, 937 + err = ipv6_dev_get_saddr(net, ip6_dst_idev(*dst)->dev, 938 938 &fl->fl6_dst, 939 939 sk ? inet6_sk(sk)->srcprefs : 0, 940 940 &fl->fl6_src);
+2 -2
net/ipv6/ipv6_sockglue.c
··· 911 911 } else { 912 912 if (np->rxopt.bits.rxinfo) { 913 913 struct in6_pktinfo src_info; 914 - src_info.ipi6_ifindex = np->mcast_oif; 914 + src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : sk->sk_bound_dev_if; 915 915 ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr); 916 916 put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info); 917 917 } ··· 921 921 } 922 922 if (np->rxopt.bits.rxoinfo) { 923 923 struct in6_pktinfo src_info; 924 - src_info.ipi6_ifindex = np->mcast_oif; 924 + src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : sk->sk_bound_dev_if; 925 925 ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr); 926 926 put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info); 927 927 }
+1 -1
net/ipv6/ndisc.c
··· 549 549 override = 0; 550 550 in6_ifa_put(ifp); 551 551 } else { 552 - if (ipv6_dev_get_saddr(dev, daddr, 552 + if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr, 553 553 inet6_sk(dev_net(dev)->ipv6.ndisc_sk)->srcprefs, 554 554 &tmpaddr)) 555 555 return;
+7 -5
net/ipv6/route.c
··· 2106 2106 + nla_total_size(sizeof(struct rta_cacheinfo)); 2107 2107 } 2108 2108 2109 - static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, 2109 + static int rt6_fill_node(struct net *net, 2110 + struct sk_buff *skb, struct rt6_info *rt, 2110 2111 struct in6_addr *dst, struct in6_addr *src, 2111 2112 int iif, int type, u32 pid, u32 seq, 2112 2113 int prefix, int nowait, unsigned int flags) ··· 2190 2189 } else if (dst) { 2191 2190 struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst); 2192 2191 struct in6_addr saddr_buf; 2193 - if (ipv6_dev_get_saddr(idev ? idev->dev : NULL, 2192 + if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, 2194 2193 dst, 0, &saddr_buf) == 0) 2195 2194 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); 2196 2195 } ··· 2235 2234 } else 2236 2235 prefix = 0; 2237 2236 2238 - return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, 2237 + return rt6_fill_node(arg->net, 2238 + arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, 2239 2239 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq, 2240 2240 prefix, 0, NLM_F_MULTI); 2241 2241 } ··· 2302 2300 rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl); 2303 2301 skb->dst = &rt->u.dst; 2304 2302 2305 - err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, 2303 + err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, 2306 2304 RTM_NEWROUTE, NETLINK_CB(in_skb).pid, 2307 2305 nlh->nlmsg_seq, 0, 0, 0); 2308 2306 if (err < 0) { ··· 2329 2327 if (skb == NULL) 2330 2328 goto errout; 2331 2329 2332 - err = rt6_fill_node(skb, rt, NULL, NULL, 0, 2330 + err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, 2333 2331 event, info->pid, seq, 0, 0, 0); 2334 2332 if (err < 0) { 2335 2333 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
+3 -1
net/ipv6/xfrm6_policy.c
··· 52 52 static int xfrm6_get_saddr(xfrm_address_t *saddr, xfrm_address_t *daddr) 53 53 { 54 54 struct dst_entry *dst; 55 + struct net_device *dev; 55 56 56 57 dst = xfrm6_dst_lookup(0, NULL, daddr); 57 58 if (IS_ERR(dst)) 58 59 return -EHOSTUNREACH; 59 60 60 - ipv6_dev_get_saddr(ip6_dst_idev(dst)->dev, 61 + dev = ip6_dst_idev(dst)->dev; 62 + ipv6_dev_get_saddr(dev_net(dev), dev, 61 63 (struct in6_addr *)&daddr->a6, 0, 62 64 (struct in6_addr *)&saddr->a6); 63 65 dst_release(dst);
+2
net/mac80211/mlme.c
··· 2103 2103 rcu_read_unlock(); 2104 2104 return; 2105 2105 } 2106 + /* update new sta with its last rx activity */ 2107 + sta->last_rx = jiffies; 2106 2108 } 2107 2109 2108 2110 /*
+25 -21
net/netfilter/nf_conntrack_netlink.c
··· 968 968 /* need to zero data of old helper */ 969 969 memset(&help->help, 0, sizeof(help->help)); 970 970 } else { 971 - help = nf_ct_helper_ext_add(ct, GFP_KERNEL); 971 + help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 972 972 if (help == NULL) 973 973 return -ENOMEM; 974 974 } ··· 1136 1136 ct->timeout.expires = jiffies + ct->timeout.expires * HZ; 1137 1137 ct->status |= IPS_CONFIRMED; 1138 1138 1139 - if (cda[CTA_STATUS]) { 1140 - err = ctnetlink_change_status(ct, cda); 1141 - if (err < 0) 1142 - goto err; 1143 - } 1144 - 1145 - if (cda[CTA_PROTOINFO]) { 1146 - err = ctnetlink_change_protoinfo(ct, cda); 1147 - if (err < 0) 1148 - goto err; 1149 - } 1150 - 1151 - nf_ct_acct_ext_add(ct, GFP_KERNEL); 1152 - 1153 - #if defined(CONFIG_NF_CONNTRACK_MARK) 1154 - if (cda[CTA_MARK]) 1155 - ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); 1156 - #endif 1157 - 1158 1139 rcu_read_lock(); 1159 1140 helper = __nf_ct_helper_find(rtuple); 1160 1141 if (helper) { 1161 - help = nf_ct_helper_ext_add(ct, GFP_KERNEL); 1142 + help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 1162 1143 if (help == NULL) { 1163 1144 rcu_read_unlock(); 1164 1145 err = -ENOMEM; ··· 1148 1167 /* not in hash table yet so not strictly necessary */ 1149 1168 rcu_assign_pointer(help->helper, helper); 1150 1169 } 1170 + 1171 + if (cda[CTA_STATUS]) { 1172 + err = ctnetlink_change_status(ct, cda); 1173 + if (err < 0) { 1174 + rcu_read_unlock(); 1175 + goto err; 1176 + } 1177 + } 1178 + 1179 + if (cda[CTA_PROTOINFO]) { 1180 + err = ctnetlink_change_protoinfo(ct, cda); 1181 + if (err < 0) { 1182 + rcu_read_unlock(); 1183 + goto err; 1184 + } 1185 + } 1186 + 1187 + nf_ct_acct_ext_add(ct, GFP_KERNEL); 1188 + 1189 + #if defined(CONFIG_NF_CONNTRACK_MARK) 1190 + if (cda[CTA_MARK]) 1191 + ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); 1192 + #endif 1151 1193 1152 1194 /* setup master conntrack: this is a confirmed expectation */ 1153 1195 if (master_ct) {
+10 -4
net/rfkill/rfkill.c
··· 150 150 * calls and handling all the red tape such as issuing notifications 151 151 * if the call is successful. 152 152 * 153 + * Suspended devices are not touched at all, and -EAGAIN is returned. 154 + * 153 155 * Note that the @force parameter cannot override a (possibly cached) 154 156 * state of RFKILL_STATE_HARD_BLOCKED. Any device making use of 155 157 * RFKILL_STATE_HARD_BLOCKED implements either get_state() or ··· 169 167 { 170 168 int retval = 0; 171 169 enum rfkill_state oldstate, newstate; 170 + 171 + if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) 172 + return -EBUSY; 172 173 173 174 oldstate = rfkill->state; 174 175 ··· 219 214 * 220 215 * This function toggles the state of all switches of given type, 221 216 * unless a specific switch is claimed by userspace (in which case, 222 - * that switch is left alone). 217 + * that switch is left alone) or suspended. 223 218 */ 224 219 void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) 225 220 { ··· 244 239 /** 245 240 * rfkill_epo - emergency power off all transmitters 246 241 * 247 - * This kicks all rfkill devices to RFKILL_STATE_SOFT_BLOCKED, ignoring 248 - * everything in its path but rfkill_mutex and rfkill->mutex. 242 + * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, 243 + * ignoring everything in its path but rfkill_mutex and rfkill->mutex. 249 244 */ 250 245 void rfkill_epo(void) 251 246 { ··· 463 458 if (dev->power.power_state.event != PM_EVENT_ON) { 464 459 mutex_lock(&rfkill->mutex); 465 460 461 + dev->power.power_state.event = PM_EVENT_ON; 462 + 466 463 /* restore radio state AND notify everybody */ 467 464 rfkill_toggle_radio(rfkill, rfkill->state, 1); 468 465 469 466 mutex_unlock(&rfkill->mutex); 470 467 } 471 468 472 - dev->power.power_state = PMSG_ON; 473 469 return 0; 474 470 } 475 471 #else
+1 -1
net/sched/cls_api.c
··· 280 280 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { 281 281 spin_lock_bh(root_lock); 282 282 *back = tp->next; 283 - spin_lock_bh(root_lock); 283 + spin_unlock_bh(root_lock); 284 284 285 285 tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); 286 286 tcf_destroy(tp);
+65 -30
net/sched/sch_api.c
··· 27 27 #include <linux/kmod.h> 28 28 #include <linux/list.h> 29 29 #include <linux/hrtimer.h> 30 + #include <linux/lockdep.h> 30 31 31 32 #include <net/net_namespace.h> 32 33 #include <net/sock.h> ··· 199 198 return NULL; 200 199 } 201 200 201 + /* 202 + * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen() 203 + * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue() 204 + */ 205 + static DEFINE_SPINLOCK(qdisc_list_lock); 206 + 207 + static void qdisc_list_add(struct Qdisc *q) 208 + { 209 + if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { 210 + spin_lock_bh(&qdisc_list_lock); 211 + list_add_tail(&q->list, &qdisc_root_sleeping(q)->list); 212 + spin_unlock_bh(&qdisc_list_lock); 213 + } 214 + } 215 + 216 + void qdisc_list_del(struct Qdisc *q) 217 + { 218 + if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { 219 + spin_lock_bh(&qdisc_list_lock); 220 + list_del(&q->list); 221 + spin_unlock_bh(&qdisc_list_lock); 222 + } 223 + } 224 + EXPORT_SYMBOL(qdisc_list_del); 225 + 202 226 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) 203 227 { 204 228 unsigned int i; 229 + struct Qdisc *q; 230 + 231 + spin_lock_bh(&qdisc_list_lock); 205 232 206 233 for (i = 0; i < dev->num_tx_queues; i++) { 207 234 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 208 - struct Qdisc *q, *txq_root = txq->qdisc_sleeping; 235 + struct Qdisc *txq_root = txq->qdisc_sleeping; 209 236 210 237 q = qdisc_match_from_root(txq_root, handle); 211 238 if (q) 212 - return q; 239 + goto unlock; 213 240 } 214 - return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); 241 + 242 + q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); 243 + 244 + unlock: 245 + spin_unlock_bh(&qdisc_list_lock); 246 + 247 + return q; 215 248 } 216 249 217 250 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) ··· 366 331 if (!s || tsize != s->tsize || (!tab && tsize > 0)) 367 332 return ERR_PTR(-EINVAL); 368 333 369 - spin_lock_bh(&qdisc_stab_lock); 334 + spin_lock(&qdisc_stab_lock); 370 335 371 336 list_for_each_entry(stab, &qdisc_stab_list, list) { 372 337 if (memcmp(&stab->szopts, s, sizeof(*s))) ··· 374 339 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16))) 375 340 continue; 376 341 stab->refcnt++; 377 - spin_unlock_bh(&qdisc_stab_lock); 342 + spin_unlock(&qdisc_stab_lock); 378 343 return stab; 379 344 } 380 345 381 - spin_unlock_bh(&qdisc_stab_lock); 346 + spin_unlock(&qdisc_stab_lock); 382 347 383 348 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL); 384 349 if (!stab) ··· 389 354 if (tsize > 0) 390 355 memcpy(stab->data, tab, tsize * sizeof(u16)); 391 356 392 - spin_lock_bh(&qdisc_stab_lock); 357 + spin_lock(&qdisc_stab_lock); 393 358 list_add_tail(&stab->list, &qdisc_stab_list); 394 - spin_unlock_bh(&qdisc_stab_lock); 359 + spin_unlock(&qdisc_stab_lock); 395 360 396 361 return stab; 397 362 } ··· 401 366 if (!tab) 402 367 return; 403 368 404 - spin_lock_bh(&qdisc_stab_lock); 369 + spin_lock(&qdisc_stab_lock); 405 370 406 371 if (--tab->refcnt == 0) { 407 372 list_del(&tab->list); 408 373 kfree(tab); 409 374 } 410 375 411 - spin_unlock_bh(&qdisc_stab_lock); 376 + spin_unlock(&qdisc_stab_lock); 412 377 } 413 378 EXPORT_SYMBOL(qdisc_put_stab); 414 379 ··· 461 426 462 427 wd->qdisc->flags &= ~TCQ_F_THROTTLED; 463 428 smp_wmb(); 464 - __netif_schedule(wd->qdisc); 429 + __netif_schedule(qdisc_root(wd->qdisc)); 465 430 466 431 return HRTIMER_NORESTART; 467 432 } ··· 477 442 void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) 478 443 { 479 444 ktime_t time; 445 + 446 + if (test_bit(__QDISC_STATE_DEACTIVATED, 447 + &qdisc_root_sleeping(wd->qdisc)->state)) 448 + return; 480 449 481 450 wd->qdisc->flags |= TCQ_F_THROTTLED; 482 451 time = ktime_set(0, 0); ··· 676 637 if (new || old) 677 638 qdisc_notify(skb, n, clid, old, new); 678 639 679 - if (old) { 680 - spin_lock_bh(&old->q.lock); 640 + if (old) 681 641 qdisc_destroy(old); 682 - spin_unlock_bh(&old->q.lock); 683 - } 684 642 } 685 643 686 644 /* Graft qdisc "new" to class "classid" of qdisc "parent" or ··· 743 707 return err; 744 708 } 745 709 710 + /* lockdep annotation is needed for ingress; egress gets it only for name */ 711 + static struct lock_class_key qdisc_tx_lock; 712 + static struct lock_class_key qdisc_rx_lock; 713 + 746 714 /* 747 715 Allocate and initialize new qdisc. 748 716 ··· 807 767 if (handle == TC_H_INGRESS) { 808 768 sch->flags |= TCQ_F_INGRESS; 809 769 handle = TC_H_MAKE(TC_H_INGRESS, 0); 770 + lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock); 810 771 } else { 811 772 if (handle == 0) { 812 773 handle = qdisc_alloc_handle(dev); ··· 815 774 if (handle == 0) 816 775 goto err_out3; 817 776 } 777 + lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock); 818 778 } 819 779 820 780 sch->handle = handle; ··· 844 802 goto err_out3; 845 803 } 846 804 } 847 - if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS)) 848 - list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list); 805 + 806 + qdisc_list_add(sch); 849 807 850 808 return sch; 851 809 } ··· 1126 1084 } 1127 1085 1128 1086 graft: 1129 - if (1) { 1130 - spinlock_t *root_lock; 1131 - 1132 - err = qdisc_graft(dev, p, skb, n, clid, q, NULL); 1133 - if (err) { 1134 - if (q) { 1135 - root_lock = qdisc_root_lock(q); 1136 - spin_lock_bh(root_lock); 1137 - qdisc_destroy(q); 1138 - spin_unlock_bh(root_lock); 1139 - } 1140 - return err; 1141 - } 1087 + err = qdisc_graft(dev, p, skb, n, clid, q, NULL); 1088 + if (err) { 1089 + if (q) 1090 + qdisc_destroy(q); 1091 + return err; 1142 1092 } 1093 + 1143 1094 return 0; 1144 1095 } 1145 1096
+5 -1
net/sched/sch_cbq.c
··· 521 521 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); 522 522 psched_tdiff_t delay = cl->undertime - q->now; 523 523 524 + if (test_bit(__QDISC_STATE_DEACTIVATED, 525 + &qdisc_root_sleeping(cl->qdisc)->state)) 526 + return; 527 + 524 528 if (!cl->delayed) { 525 529 psched_time_t sched = q->now; 526 530 ktime_t expires; ··· 658 654 } 659 655 660 656 sch->flags &= ~TCQ_F_THROTTLED; 661 - __netif_schedule(sch); 657 + __netif_schedule(qdisc_root(sch)); 662 658 return HRTIMER_NORESTART; 663 659 } 664 660
+18 -49
net/sched/sch_generic.c
··· 518 518 } 519 519 EXPORT_SYMBOL(qdisc_reset); 520 520 521 - /* this is the rcu callback function to clean up a qdisc when there 522 - * are no further references to it */ 523 - 524 - static void __qdisc_destroy(struct rcu_head *head) 521 + void qdisc_destroy(struct Qdisc *qdisc) 525 522 { 526 - struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu); 527 523 const struct Qdisc_ops *ops = qdisc->ops; 528 524 525 + if (qdisc->flags & TCQ_F_BUILTIN || 526 + !atomic_dec_and_test(&qdisc->refcnt)) 527 + return; 528 + 529 529 #ifdef CONFIG_NET_SCHED 530 + qdisc_list_del(qdisc); 531 + 530 532 qdisc_put_stab(qdisc->stab); 531 533 #endif 532 534 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); ··· 543 541 kfree_skb(qdisc->gso_skb); 544 542 545 543 kfree((char *) qdisc - qdisc->padded); 546 - } 547 - 548 - /* Under qdisc_lock(qdisc) and BH! */ 549 - 550 - void qdisc_destroy(struct Qdisc *qdisc) 551 - { 552 - if (qdisc->flags & TCQ_F_BUILTIN || 553 - !atomic_dec_and_test(&qdisc->refcnt)) 554 - return; 555 - 556 - if (qdisc->parent) 557 - list_del(&qdisc->list); 558 - 559 - call_rcu(&qdisc->q_rcu, __qdisc_destroy); 560 544 } 561 545 EXPORT_SYMBOL(qdisc_destroy); 562 546 ··· 584 596 { 585 597 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; 586 598 int *need_watchdog_p = _need_watchdog; 599 + 600 + if (!(new_qdisc->flags & TCQ_F_BUILTIN)) 601 + clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); 587 602 588 603 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 589 604 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) ··· 631 640 if (qdisc) { 632 641 spin_lock_bh(qdisc_lock(qdisc)); 633 642 643 + if (!(qdisc->flags & TCQ_F_BUILTIN)) 644 + set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 645 + 634 646 dev_queue->qdisc = qdisc_default; 635 647 qdisc_reset(qdisc); 636 648 ··· 641 647 } 642 648 } 643 649 644 - static bool some_qdisc_is_busy(struct net_device *dev, int lock) 650 + static bool some_qdisc_is_busy(struct net_device *dev) 645 651 { 646 652 unsigned int i; 647 653 ··· 655 661 q = dev_queue->qdisc_sleeping; 656 662 root_lock = qdisc_lock(q); 657 663 658 - if (lock) 659 - spin_lock_bh(root_lock); 664 + spin_lock_bh(root_lock); 660 665 661 666 val = (test_bit(__QDISC_STATE_RUNNING, &q->state) || 662 667 test_bit(__QDISC_STATE_SCHED, &q->state)); 663 668 664 - if (lock) 665 - spin_unlock_bh(root_lock); 669 + spin_unlock_bh(root_lock); 666 670 667 671 if (val) 668 672 return true; ··· 670 678 671 679 void dev_deactivate(struct net_device *dev) 672 680 { 673 - bool running; 674 - 675 681 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); 676 682 dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc); 677 683 ··· 679 689 synchronize_rcu(); 680 690 681 691 /* Wait for outstanding qdisc_run calls. */ 682 - do { 683 - while (some_qdisc_is_busy(dev, 0)) 684 - yield(); 685 - 686 - /* 687 - * Double-check inside queue lock to ensure that all effects 688 - * of the queue run are visible when we return. 689 - */ 690 - running = some_qdisc_is_busy(dev, 1); 691 - 692 - /* 693 - * The running flag should never be set at this point because 694 - * we've already set dev->qdisc to noop_qdisc *inside* the same 695 - * pair of spin locks. That is, if any qdisc_run starts after 696 - * our initial test it should see the noop_qdisc and then 697 - * clear the RUNNING bit before dropping the queue lock. So 698 - * if it is set here then we've found a bug. 699 - */ 700 - } while (WARN_ON_ONCE(running)); 692 + while (some_qdisc_is_busy(dev)) 693 + yield(); 701 694 } 702 695 703 696 static void dev_init_scheduler_queue(struct net_device *dev, ··· 709 736 struct Qdisc *qdisc_default = _qdisc_default; 710 737 711 738 if (qdisc) { 712 - spinlock_t *root_lock = qdisc_lock(qdisc); 713 - 714 739 dev_queue->qdisc = qdisc_default; 715 740 dev_queue->qdisc_sleeping = qdisc_default; 716 741 717 - spin_lock_bh(root_lock); 718 742 qdisc_destroy(qdisc); 719 - spin_unlock_bh(root_lock); 720 743 } 721 744 } 722 745
+2 -2
net/sched/sch_htb.c
··· 577 577 sch->qstats.drops++; 578 578 cl->qstats.drops++; 579 579 } 580 - return NET_XMIT_DROP; 580 + return ret; 581 581 } else { 582 582 cl->bstats.packets += 583 583 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; ··· 623 623 sch->qstats.drops++; 624 624 cl->qstats.drops++; 625 625 } 626 - return NET_XMIT_DROP; 626 + return ret; 627 627 } else 628 628 htb_activate(q, cl); 629 629
+2 -2
net/sched/sch_prio.c
··· 113 113 if ((ret = qdisc->ops->requeue(skb, qdisc)) == NET_XMIT_SUCCESS) { 114 114 sch->q.qlen++; 115 115 sch->qstats.requeues++; 116 - return 0; 116 + return NET_XMIT_SUCCESS; 117 117 } 118 118 if (net_xmit_drop_count(ret)) 119 119 sch->qstats.drops++; 120 - return NET_XMIT_DROP; 120 + return ret; 121 121 } 122 122 123 123
+2 -9
net/sched/sch_tbf.c
··· 123 123 struct tbf_sched_data *q = qdisc_priv(sch); 124 124 int ret; 125 125 126 - if (qdisc_pkt_len(skb) > q->max_size) { 127 - sch->qstats.drops++; 128 - #ifdef CONFIG_NET_CLS_ACT 129 - if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 130 - #endif 131 - kfree_skb(skb); 132 - 133 - return NET_XMIT_DROP; 134 - } 126 + if (qdisc_pkt_len(skb) > q->max_size) 127 + return qdisc_reshape_fail(skb, sch); 135 128 136 129 ret = qdisc_enqueue(skb, q->qdisc); 137 130 if (ret != 0) {
+2 -2
net/sctp/endpointola.c
··· 103 103 104 104 /* Initialize the CHUNKS parameter */ 105 105 auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS; 106 + auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t)); 106 107 107 108 /* If the Add-IP functionality is enabled, we must 108 109 * authenticate, ASCONF and ASCONF-ACK chunks ··· 111 110 if (sctp_addip_enable) { 112 111 auth_chunks->chunks[0] = SCTP_CID_ASCONF; 113 112 auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; 114 - auth_chunks->param_hdr.length = 115 - htons(sizeof(sctp_paramhdr_t) + 2); 113 + auth_chunks->param_hdr.length += htons(2); 116 114 } 117 115 } 118 116
+2 -1
net/sctp/ipv6.c
··· 319 319 __func__, asoc, dst, NIP6(daddr->v6.sin6_addr)); 320 320 321 321 if (!asoc) { 322 - ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, 322 + ipv6_dev_get_saddr(sock_net(sctp_opt2sk(sk)), 323 + dst ? ip6_dst_idev(dst)->dev : NULL, 323 324 &daddr->v6.sin6_addr, 324 325 inet6_sk(&sk->inet.sk)->srcprefs, 325 326 &saddr->v6.sin6_addr);
+65 -20
net/sctp/socket.c
··· 3055 3055 { 3056 3056 struct sctp_authchunk val; 3057 3057 3058 + if (!sctp_auth_enable) 3059 + return -EACCES; 3060 + 3058 3061 if (optlen != sizeof(struct sctp_authchunk)) 3059 3062 return -EINVAL; 3060 3063 if (copy_from_user(&val, optval, optlen)) ··· 3087 3084 { 3088 3085 struct sctp_hmacalgo *hmacs; 3089 3086 int err; 3087 + 3088 + if (!sctp_auth_enable) 3089 + return -EACCES; 3090 3090 3091 3091 if (optlen < sizeof(struct sctp_hmacalgo)) 3092 3092 return -EINVAL; ··· 3129 3123 struct sctp_association *asoc; 3130 3124 int ret; 3131 3125 3126 + if (!sctp_auth_enable) 3127 + return -EACCES; 3128 + 3132 3129 if (optlen <= sizeof(struct sctp_authkey)) 3133 3130 return -EINVAL; 3134 3131 ··· 3169 3160 struct sctp_authkeyid val; 3170 3161 struct sctp_association *asoc; 3171 3162 3163 + if (!sctp_auth_enable) 3164 + return -EACCES; 3165 + 3172 3166 if (optlen != sizeof(struct sctp_authkeyid)) 3173 3167 return -EINVAL; 3174 3168 if (copy_from_user(&val, optval, optlen)) ··· 3196 3184 { 3197 3185 struct sctp_authkeyid val; 3198 3186 struct sctp_association *asoc; 3187 + 3188 + if (!sctp_auth_enable) 3189 + return -EACCES; 3199 3190 3200 3191 if (optlen != sizeof(struct sctp_authkeyid)) 3201 3192 return -EINVAL; ··· 5212 5197 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, 5213 5198 char __user *optval, int __user *optlen) 5214 5199 { 5200 + struct sctp_hmacalgo __user *p = (void __user *)optval; 5215 5201 struct sctp_hmac_algo_param *hmacs; 5216 - __u16 param_len; 5202 + __u16 data_len = 0; 5203 + u32 num_idents; 5204 + 5205 + if (!sctp_auth_enable) 5206 + return -EACCES; 5217 5207 5218 5208 hmacs = sctp_sk(sk)->ep->auth_hmacs_list; 5219 - param_len = ntohs(hmacs->param_hdr.length); 5209 + data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); 5220 5210 5221 - if (len < param_len) 5211 + if (len < sizeof(struct sctp_hmacalgo) + data_len) 5222 5212 return -EINVAL; 5213 + 5214 + len = sizeof(struct sctp_hmacalgo) + data_len; 5215 + num_idents = data_len / sizeof(u16); 5216 + 5223 5217 if (put_user(len, optlen)) 5224 5218 return -EFAULT; 5225 - if (copy_to_user(optval, hmacs->hmac_ids, len)) 5219 + if (put_user(num_idents, &p->shmac_num_idents)) 5226 5220 return -EFAULT; 5227 - 5221 + if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) 5222 + return -EFAULT; 5228 5223 return 0; 5229 5224 } 5230 5225 ··· 5243 5218 { 5244 5219 struct sctp_authkeyid val; 5245 5220 struct sctp_association *asoc; 5221 + 5222 + if (!sctp_auth_enable) 5223 + return -EACCES; 5246 5224 5247 5225 if (len < sizeof(struct sctp_authkeyid)) 5248 5226 return -EINVAL; ··· 5261 5233 else 5262 5234 val.scact_keynumber = sctp_sk(sk)->ep->active_key_id; 5263 5235 5236 + len = sizeof(struct sctp_authkeyid); 5237 + if (put_user(len, optlen)) 5238 + return -EFAULT; 5239 + if (copy_to_user(optval, &val, len)) 5240 + return -EFAULT; 5241 + 5264 5242 return 0; 5265 5243 } 5266 5244 ··· 5277 5243 struct sctp_authchunks val; 5278 5244 struct sctp_association *asoc; 5279 5245 struct sctp_chunks_param *ch; 5280 - u32 num_chunks; 5246 + u32 num_chunks = 0; 5281 5247 char __user *to; 5282 5248 5283 - if (len <= sizeof(struct sctp_authchunks)) 5249 + if (!sctp_auth_enable) 5250 + return -EACCES; 5251 + 5252 + if (len < sizeof(struct sctp_authchunks)) 5284 5253 return -EINVAL; 5285 5254 5286 - if (copy_from_user(&val, p, sizeof(struct sctp_authchunks))) 5255 + if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5287 5256 return -EFAULT; 5288 5257 5289 5258 to = p->gauth_chunks; ··· 5295 5258 return -EINVAL; 5296 5259 5297 5260 ch = asoc->peer.peer_chunks; 5261 + if (!ch) 5262 + goto num; 5298 5263 5299 5264 /* See if the user provided enough room for all the data */ 5300 5265 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5301 5266 if (len < num_chunks) 5302 5267 return -EINVAL; 5303 5268 5304 - len = num_chunks; 5305 - if (put_user(len, optlen)) 5269 + if (copy_to_user(to, ch->chunks, num_chunks)) 5306 5270 return -EFAULT; 5271 + num: 5272 + len = sizeof(struct sctp_authchunks) + num_chunks; 5273 + if (put_user(len, optlen)) return -EFAULT; 5307 5274 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5308 5275 return -EFAULT; 5309 - if (copy_to_user(to, ch->chunks, len)) 5310 - return -EFAULT; 5311 - 5312 5276 return 0; 5313 5277 } 5314 5278 ··· 5320 5282 struct sctp_authchunks val; 5321 5283 struct sctp_association *asoc; 5322 5284 struct sctp_chunks_param *ch; 5323 - u32 num_chunks; 5285 + u32 num_chunks = 0; 5324 5286 char __user *to; 5325 5287 5326 - if (len <= sizeof(struct sctp_authchunks)) 5288 + if (!sctp_auth_enable) 5289 + return -EACCES; 5290 + 5291 + if (len < sizeof(struct sctp_authchunks)) 5327 5292 return -EINVAL; 5328 5293 5329 - if (copy_from_user(&val, p, sizeof(struct sctp_authchunks))) 5294 + if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5330 5295 return -EFAULT; 5331 5296 5332 5297 to = p->gauth_chunks; ··· 5342 5301 else 5343 5302 ch = sctp_sk(sk)->ep->auth_chunk_list; 5344 5303 5304 + if (!ch) 5305 + goto num; 5306 + 5345 5307 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5346 - if (len < num_chunks) 5308 + if (len < sizeof(struct sctp_authchunks) + num_chunks) 5347 5309 return -EINVAL; 5348 5310 5349 - len = num_chunks; 5311 + if (copy_to_user(to, ch->chunks, num_chunks)) 5312 + return -EFAULT; 5313 + num: 5314 + len = sizeof(struct sctp_authchunks) + num_chunks; 5350 5315 if (put_user(len, optlen)) 5351 5316 return -EFAULT; 5352 5317 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5353 - return -EFAULT; 5354 - if (copy_to_user(to, ch->chunks, len)) 5355 5318 return -EFAULT; 5356 5319 5357 5320 return 0;
+11 -5
scripts/mod/file2alias.c
··· 344 344 struct module *mod) 345 345 { 346 346 const unsigned long id_size = sizeof(struct pnp_device_id); 347 - const struct pnp_device_id *id = symval; 347 + const unsigned int count = (size / id_size)-1; 348 + const struct pnp_device_id *devs = symval; 349 + unsigned int i; 348 350 349 351 device_id_check(mod->name, "pnp", size, id_size, symval); 350 352 351 - buf_printf(&mod->dev_table_buf, 352 - "MODULE_ALIAS(\"pnp:d%s*\");\n", id->id); 353 - buf_printf(&mod->dev_table_buf, 354 - "MODULE_ALIAS(\"acpi*:%s:*\");\n", id->id); 353 + for (i = 0; i < count; i++) { 354 + const char *id = (char *)devs[i].id; 355 + 356 + buf_printf(&mod->dev_table_buf, 357 + "MODULE_ALIAS(\"pnp:d%s*\");\n", id); 358 + buf_printf(&mod->dev_table_buf, 359 + "MODULE_ALIAS(\"acpi*:%s:*\");\n", id); 360 + } 355 361 } 356 362 357 363 /* looks like: "pnp:dD" for every device of the card */
-1
sound/mips/au1x00.c
··· 38 38 #include <linux/interrupt.h> 39 39 #include <linux/init.h> 40 40 #include <linux/slab.h> 41 - #include <linux/version.h> 42 41 #include <sound/core.h> 43 42 #include <sound/initval.h> 44 43 #include <sound/pcm.h>
+9
sound/pci/hda/hda_intel.c
··· 278 278 /* Defines for Nvidia HDA support */ 279 279 #define NVIDIA_HDA_TRANSREG_ADDR 0x4e 280 280 #define NVIDIA_HDA_ENABLE_COHBITS 0x0f 281 + #define NVIDIA_HDA_ISTRM_COH 0x4d 282 + #define NVIDIA_HDA_OSTRM_COH 0x4c 283 + #define NVIDIA_HDA_ENABLE_COHBIT 0x01 281 284 282 285 /* Defines for Intel SCH HDA snoop control */ 283 286 #define INTEL_SCH_HDA_DEVC 0x78 ··· 903 900 update_pci_byte(chip->pci, 904 901 NVIDIA_HDA_TRANSREG_ADDR, 905 902 0x0f, NVIDIA_HDA_ENABLE_COHBITS); 903 + update_pci_byte(chip->pci, 904 + NVIDIA_HDA_ISTRM_COH, 905 + 0x01, NVIDIA_HDA_ENABLE_COHBIT); 906 + update_pci_byte(chip->pci, 907 + NVIDIA_HDA_OSTRM_COH, 908 + 0x01, NVIDIA_HDA_ENABLE_COHBIT); 906 909 break; 907 910 case AZX_DRIVER_SCH: 908 911 pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop);
+3 -3
sound/pci/hda/patch_realtek.c
··· 952 952 tmp | 0x2010); 953 953 break; 954 954 case 0x10ec0888: 955 - alc888_coef_init(codec); 955 + /*alc888_coef_init(codec);*/ /* called in alc_init() */ 956 956 break; 957 957 case 0x10ec0267: 958 958 case 0x10ec0268: ··· 2439 2439 unsigned int i; 2440 2440 2441 2441 alc_fix_pll(codec); 2442 + if (codec->vendor_id == 0x10ec0888) 2443 + alc888_coef_init(codec); 2442 2444 2443 2445 for (i = 0; i < spec->num_init_verbs; i++) 2444 2446 snd_hda_sequence_write(codec, spec->init_verbs[i]); ··· 8428 8426 codec->patch_ops = alc_patch_ops; 8429 8427 if (board_config == ALC883_AUTO) 8430 8428 spec->init_hook = alc883_auto_init; 8431 - else if (codec->vendor_id == 0x10ec0888) 8432 - spec->init_hook = alc888_coef_init; 8433 8429 8434 8430 #ifdef CONFIG_SND_HDA_POWER_SAVE 8435 8431 if (!spec->loopback.amplist)
-1
sound/soc/at91/eti_b1_wm8731.c
··· 22 22 23 23 #include <linux/module.h> 24 24 #include <linux/moduleparam.h> 25 - #include <linux/version.h> 26 25 #include <linux/kernel.h> 27 26 #include <linux/clk.h> 28 27 #include <linux/timer.h>
-1
sound/soc/codecs/wm8753.c
··· 34 34 35 35 #include <linux/module.h> 36 36 #include <linux/moduleparam.h> 37 - #include <linux/version.h> 38 37 #include <linux/kernel.h> 39 38 #include <linux/init.h> 40 39 #include <linux/delay.h>
-1
sound/soc/codecs/wm9712.c
··· 13 13 14 14 #include <linux/init.h> 15 15 #include <linux/module.h> 16 - #include <linux/version.h> 17 16 #include <linux/kernel.h> 18 17 #include <linux/device.h> 19 18 #include <sound/core.h>