lol
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

nixosTests.ceph-single-node-bluestore: handleTest -> runTest

Sizhe Zhao 644443d5 9206b0ad

+205 -211
+2 -2
nixos/tests/all-tests.nix
··· 297 297 centrifugo = runTest ./centrifugo.nix; 298 298 ceph-multi-node = runTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-multi-node.nix; 299 299 ceph-single-node = runTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-single-node.nix; 300 - ceph-single-node-bluestore = handleTestOn [ 300 + ceph-single-node-bluestore = runTestOn [ 301 301 "aarch64-linux" 302 302 "x86_64-linux" 303 - ] ./ceph-single-node-bluestore.nix { }; 303 + ] ./ceph-single-node-bluestore.nix; 304 304 ceph-single-node-bluestore-dmcrypt = handleTestOn [ 305 305 "aarch64-linux" 306 306 "x86_64-linux"
+203 -209
nixos/tests/ceph-single-node-bluestore.nix
··· 1 - import ./make-test-python.nix ( 2 - { pkgs, lib, ... }: 1 + { lib, ... }: 3 2 4 - let 5 - cfg = { 6 - clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03"; 7 - monA = { 8 - name = "a"; 9 - ip = "192.168.1.1"; 3 + let 4 + cfg = { 5 + clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03"; 6 + monA = { 7 + name = "a"; 8 + ip = "192.168.1.1"; 9 + }; 10 + osd0 = { 11 + name = "0"; 12 + key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg=="; 13 + uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9"; 14 + }; 15 + osd1 = { 16 + name = "1"; 17 + key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ=="; 18 + uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5"; 19 + }; 20 + osd2 = { 21 + name = "2"; 22 + key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w=="; 23 + uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f"; 24 + }; 25 + }; 26 + generateCephConfig = 27 + { daemonConfig }: 28 + { 29 + enable = true; 30 + global = { 31 + fsid = cfg.clusterId; 32 + monHost = cfg.monA.ip; 33 + monInitialMembers = cfg.monA.name; 10 34 }; 11 - osd0 = { 12 - name = "0"; 13 - key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg=="; 14 - uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9"; 15 - }; 16 - osd1 = { 17 - name = "1"; 18 - key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ=="; 19 - uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5"; 20 - }; 21 - osd2 = { 22 - name = "2"; 23 - key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w=="; 24 - uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f"; 35 + } 36 + // daemonConfig; 37 + 38 + generateHost = 39 + { 40 + cephConfig, 41 + networkConfig, 42 + }: 43 + { pkgs, ... }: 44 + { 45 + virtualisation = { 46 + emptyDiskImages = [ 47 + 20480 48 + 20480 49 + 20480 50 + ]; 51 + vlans = [ 1 ]; 25 52 }; 26 - }; 27 - generateCephConfig = 28 - { daemonConfig }: 29 - { 30 - enable = true; 31 - global = { 32 - fsid = cfg.clusterId; 33 - monHost = cfg.monA.ip; 34 - monInitialMembers = cfg.monA.name; 35 - }; 36 - } 37 - // daemonConfig; 38 53 39 - generateHost = 40 - { 41 - pkgs, 42 - cephConfig, 43 - networkConfig, 44 - ... 45 - }: 46 - { 47 - virtualisation = { 48 - emptyDiskImages = [ 49 - 20480 50 - 20480 51 - 20480 52 - ]; 53 - vlans = [ 1 ]; 54 - }; 54 + networking = networkConfig; 55 55 56 - networking = networkConfig; 56 + environment.systemPackages = with pkgs; [ 57 + bash 58 + sudo 59 + ceph 60 + xfsprogs 61 + ]; 57 62 58 - environment.systemPackages = with pkgs; [ 59 - bash 60 - sudo 61 - ceph 62 - xfsprogs 63 - ]; 63 + boot.kernelModules = [ "xfs" ]; 64 64 65 - boot.kernelModules = [ "xfs" ]; 65 + services.ceph = cephConfig; 66 + }; 66 67 67 - services.ceph = cephConfig; 68 + networkMonA = { 69 + dhcpcd.enable = false; 70 + interfaces.eth1.ipv4.addresses = lib.mkOverride 0 [ 71 + { 72 + address = cfg.monA.ip; 73 + prefixLength = 24; 74 + } 75 + ]; 76 + }; 77 + cephConfigMonA = generateCephConfig { 78 + daemonConfig = { 79 + mon = { 80 + enable = true; 81 + daemons = [ cfg.monA.name ]; 68 82 }; 69 - 70 - networkMonA = { 71 - dhcpcd.enable = false; 72 - interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [ 73 - { 74 - address = cfg.monA.ip; 75 - prefixLength = 24; 76 - } 77 - ]; 78 - }; 79 - cephConfigMonA = generateCephConfig { 80 - daemonConfig = { 81 - mon = { 82 - enable = true; 83 - daemons = [ cfg.monA.name ]; 84 - }; 85 - mgr = { 86 - enable = true; 87 - daemons = [ cfg.monA.name ]; 88 - }; 89 - osd = { 90 - enable = true; 91 - daemons = [ 92 - cfg.osd0.name 93 - cfg.osd1.name 94 - cfg.osd2.name 95 - ]; 96 - }; 83 + mgr = { 84 + enable = true; 85 + daemons = [ cfg.monA.name ]; 86 + }; 87 + osd = { 88 + enable = true; 89 + daemons = [ 90 + cfg.osd0.name 91 + cfg.osd1.name 92 + cfg.osd2.name 93 + ]; 97 94 }; 98 95 }; 96 + }; 99 97 100 - # Following deployment is based on the manual deployment described here: 101 - # https://docs.ceph.com/docs/master/install/manual-deployment/ 102 - # For other ways to deploy a ceph cluster, look at the documentation at 103 - # https://docs.ceph.com/docs/master/ 104 - testscript = 105 - { ... }: 106 - '' 107 - start_all() 98 + # Following deployment is based on the manual deployment described here: 99 + # https://docs.ceph.com/docs/master/install/manual-deployment/ 100 + # For other ways to deploy a ceph cluster, look at the documentation at 101 + # https://docs.ceph.com/docs/master/ 102 + testScript = '' 103 + start_all() 108 104 109 - monA.wait_for_unit("network.target") 105 + monA.wait_for_unit("network.target") 110 106 111 - # Bootstrap ceph-mon daemon 112 - monA.succeed( 113 - "sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'", 114 - "sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'", 115 - "sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring", 116 - "monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap", 117 - "sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring", 118 - "sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done", 119 - "systemctl start ceph-mon-${cfg.monA.name}", 120 - ) 121 - monA.wait_for_unit("ceph-mon-${cfg.monA.name}") 122 - monA.succeed("ceph mon enable-msgr2") 123 - monA.succeed("ceph config set mon auth_allow_insecure_global_id_reclaim false") 107 + # Bootstrap ceph-mon daemon 108 + monA.succeed( 109 + "sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'", 110 + "sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'", 111 + "sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring", 112 + "monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap", 113 + "sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring", 114 + "sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done", 115 + "systemctl start ceph-mon-${cfg.monA.name}", 116 + ) 117 + monA.wait_for_unit("ceph-mon-${cfg.monA.name}") 118 + monA.succeed("ceph mon enable-msgr2") 119 + monA.succeed("ceph config set mon auth_allow_insecure_global_id_reclaim false") 124 120 125 - # Can't check ceph status until a mon is up 126 - monA.succeed("ceph -s | grep 'mon: 1 daemons'") 121 + # Can't check ceph status until a mon is up 122 + monA.succeed("ceph -s | grep 'mon: 1 daemons'") 127 123 128 - # Start the ceph-mgr daemon, after copying in the keyring 129 - monA.succeed( 130 - "sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/", 131 - "ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring", 132 - "systemctl start ceph-mgr-${cfg.monA.name}", 133 - ) 134 - monA.wait_for_unit("ceph-mgr-a") 135 - monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'") 136 - monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'") 124 + # Start the ceph-mgr daemon, after copying in the keyring 125 + monA.succeed( 126 + "sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/", 127 + "ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring", 128 + "systemctl start ceph-mgr-${cfg.monA.name}", 129 + ) 130 + monA.wait_for_unit("ceph-mgr-a") 131 + monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'") 132 + monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'") 137 133 138 - # Bootstrap OSDs 139 - monA.succeed( 140 - "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}", 141 - "echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd0.name}/type", 142 - "ln -sf /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}/block", 143 - "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}", 144 - "echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd1.name}/type", 145 - "ln -sf /dev/vdc /var/lib/ceph/osd/ceph-${cfg.osd1.name}/block", 146 - "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}", 147 - "echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd2.name}/type", 148 - "ln -sf /dev/vdd /var/lib/ceph/osd/ceph-${cfg.osd2.name}/block", 149 - "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}", 150 - "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}", 151 - "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}", 152 - 'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -', 153 - 'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -', 154 - 'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -', 155 - ) 134 + # Bootstrap OSDs 135 + monA.succeed( 136 + "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}", 137 + "echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd0.name}/type", 138 + "ln -sf /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}/block", 139 + "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}", 140 + "echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd1.name}/type", 141 + "ln -sf /dev/vdc /var/lib/ceph/osd/ceph-${cfg.osd1.name}/block", 142 + "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}", 143 + "echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd2.name}/type", 144 + "ln -sf /dev/vdd /var/lib/ceph/osd/ceph-${cfg.osd2.name}/block", 145 + "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}", 146 + "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}", 147 + "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}", 148 + 'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -', 149 + 'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -', 150 + 'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -', 151 + ) 156 152 157 - # Initialize the OSDs with regular filestore 158 - monA.succeed( 159 - "ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}", 160 - "ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}", 161 - "ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}", 162 - "chown -R ceph:ceph /var/lib/ceph/osd", 163 - "systemctl start ceph-osd-${cfg.osd0.name}", 164 - "systemctl start ceph-osd-${cfg.osd1.name}", 165 - "systemctl start ceph-osd-${cfg.osd2.name}", 166 - ) 167 - monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'") 168 - monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'") 169 - monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'") 153 + # Initialize the OSDs with regular filestore 154 + monA.succeed( 155 + "ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}", 156 + "ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}", 157 + "ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}", 158 + "chown -R ceph:ceph /var/lib/ceph/osd", 159 + "systemctl start ceph-osd-${cfg.osd0.name}", 160 + "systemctl start ceph-osd-${cfg.osd1.name}", 161 + "systemctl start ceph-osd-${cfg.osd2.name}", 162 + ) 163 + monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'") 164 + monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'") 165 + monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'") 170 166 171 - monA.succeed( 172 - "ceph osd pool create single-node-test 32 32", 173 - "ceph osd pool ls | grep 'single-node-test'", 167 + monA.succeed( 168 + "ceph osd pool create single-node-test 32 32", 169 + "ceph osd pool ls | grep 'single-node-test'", 174 170 175 - # We need to enable an application on the pool, otherwise it will 176 - # stay unhealthy in state POOL_APP_NOT_ENABLED. 177 - # Creating a CephFS would do this automatically, but we haven't done that here. 178 - # See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application 179 - # We use the custom application name "nixos-test" for this. 180 - "ceph osd pool application enable single-node-test nixos-test", 171 + # We need to enable an application on the pool, otherwise it will 172 + # stay unhealthy in state POOL_APP_NOT_ENABLED. 173 + # Creating a CephFS would do this automatically, but we haven't done that here. 174 + # See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application 175 + # We use the custom application name "nixos-test" for this. 176 + "ceph osd pool application enable single-node-test nixos-test", 181 177 182 - "ceph osd pool rename single-node-test single-node-other-test", 183 - "ceph osd pool ls | grep 'single-node-other-test'", 184 - ) 185 - monA.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'") 186 - monA.succeed( 187 - "ceph osd getcrushmap -o crush", 188 - "crushtool -d crush -o decrushed", 189 - "sed 's/step chooseleaf firstn 0 type host/step chooseleaf firstn 0 type osd/' decrushed > modcrush", 190 - "crushtool -c modcrush -o recrushed", 191 - "ceph osd setcrushmap -i recrushed", 192 - "ceph osd pool set single-node-other-test size 2", 193 - ) 194 - monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'") 195 - monA.wait_until_succeeds("ceph -s | grep '33 active+clean'") 196 - monA.fail( 197 - "ceph osd pool ls | grep 'multi-node-test'", 198 - "ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it", 199 - ) 178 + "ceph osd pool rename single-node-test single-node-other-test", 179 + "ceph osd pool ls | grep 'single-node-other-test'", 180 + ) 181 + monA.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'") 182 + monA.succeed( 183 + "ceph osd getcrushmap -o crush", 184 + "crushtool -d crush -o decrushed", 185 + "sed 's/step chooseleaf firstn 0 type host/step chooseleaf firstn 0 type osd/' decrushed > modcrush", 186 + "crushtool -c modcrush -o recrushed", 187 + "ceph osd setcrushmap -i recrushed", 188 + "ceph osd pool set single-node-other-test size 2", 189 + ) 190 + monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'") 191 + monA.wait_until_succeeds("ceph -s | grep '33 active+clean'") 192 + monA.fail( 193 + "ceph osd pool ls | grep 'multi-node-test'", 194 + "ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it", 195 + ) 200 196 201 - # Shut down ceph by stopping ceph.target. 202 - monA.succeed("systemctl stop ceph.target") 197 + # Shut down ceph by stopping ceph.target. 198 + monA.succeed("systemctl stop ceph.target") 203 199 204 - # Start it up 205 - monA.succeed("systemctl start ceph.target") 206 - monA.wait_for_unit("ceph-mon-${cfg.monA.name}") 207 - monA.wait_for_unit("ceph-mgr-${cfg.monA.name}") 208 - monA.wait_for_unit("ceph-osd-${cfg.osd0.name}") 209 - monA.wait_for_unit("ceph-osd-${cfg.osd1.name}") 210 - monA.wait_for_unit("ceph-osd-${cfg.osd2.name}") 200 + # Start it up 201 + monA.succeed("systemctl start ceph.target") 202 + monA.wait_for_unit("ceph-mon-${cfg.monA.name}") 203 + monA.wait_for_unit("ceph-mgr-${cfg.monA.name}") 204 + monA.wait_for_unit("ceph-osd-${cfg.osd0.name}") 205 + monA.wait_for_unit("ceph-osd-${cfg.osd1.name}") 206 + monA.wait_for_unit("ceph-osd-${cfg.osd2.name}") 211 207 212 - # Ensure the cluster comes back up again 213 - monA.succeed("ceph -s | grep 'mon: 1 daemons'") 214 - monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'") 215 - monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'") 216 - monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'") 217 - monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'") 218 - ''; 219 - in 220 - { 221 - name = "basic-single-node-ceph-cluster-bluestore"; 222 - meta = with pkgs.lib.maintainers; { 223 - maintainers = [ lukegb ]; 224 - }; 208 + # Ensure the cluster comes back up again 209 + monA.succeed("ceph -s | grep 'mon: 1 daemons'") 210 + monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'") 211 + monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'") 212 + monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'") 213 + monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'") 214 + ''; 215 + in 216 + { 217 + name = "basic-single-node-ceph-cluster-bluestore"; 218 + meta = with lib.maintainers; { 219 + maintainers = [ lukegb ]; 220 + }; 225 221 226 - nodes = { 227 - monA = generateHost { 228 - pkgs = pkgs; 229 - cephConfig = cephConfigMonA; 230 - networkConfig = networkMonA; 231 - }; 222 + nodes = { 223 + monA = generateHost { 224 + cephConfig = cephConfigMonA; 225 + networkConfig = networkMonA; 232 226 }; 227 + }; 233 228 234 - testScript = testscript; 235 - } 236 - ) 229 + inherit testScript; 230 + }