Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drbd: Backport the "status" command

The status command originates the drbd9 code base. While for now we
keep the status information in /proc/drbd available, this commit
allows the user base to gracefully migrate their monitoring
infrastructure to the new status reporting interface.

In drbd9 no status information is exposed through /proc/drbd.

Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Signed-off-by: Jens Axboe <axboe@fb.com>

authored by

Andreas Gruenbacher and committed by
Jens Axboe
a55bbd37 a2972846

+536 -79
+487 -79
drivers/block/drbd/drbd_nl.c
··· 76 76 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info); 77 77 /* .dumpit */ 78 78 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb); 79 + int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb); 80 + int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb); 81 + int drbd_adm_dump_devices_done(struct netlink_callback *cb); 82 + int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb); 83 + int drbd_adm_dump_connections_done(struct netlink_callback *cb); 84 + int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb); 85 + int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb); 79 86 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb); 80 87 81 88 #include <linux/drbd_genl_api.h> ··· 2972 2965 } 2973 2966 2974 2967 /* 2968 + * The generic netlink dump callbacks are called outside the genl_lock(), so 2969 + * they cannot use the simple attribute parsing code which uses global 2970 + * attribute tables. 2971 + */ 2972 + static struct nlattr *find_cfg_context_attr(const struct nlmsghdr *nlh, int attr) 2973 + { 2974 + const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ; 2975 + const int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1; 2976 + struct nlattr *nla; 2977 + 2978 + nla = nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen), 2979 + DRBD_NLA_CFG_CONTEXT); 2980 + if (!nla) 2981 + return NULL; 2982 + return drbd_nla_find_nested(maxtype, nla, __nla_type(attr)); 2983 + } 2984 + 2985 + static void resource_to_info(struct resource_info *, struct drbd_resource *); 2986 + 2987 + int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb) 2988 + { 2989 + struct drbd_genlmsghdr *dh; 2990 + struct drbd_resource *resource; 2991 + struct resource_info resource_info; 2992 + struct resource_statistics resource_statistics; 2993 + int err; 2994 + 2995 + rcu_read_lock(); 2996 + if (cb->args[0]) { 2997 + for_each_resource_rcu(resource, &drbd_resources) 2998 + if (resource == (struct drbd_resource *)cb->args[0]) 2999 + goto found_resource; 3000 + err = 0; /* resource was probably deleted */ 3001 + goto out; 3002 + } 3003 + resource = list_entry(&drbd_resources, 3004 + struct drbd_resource, resources); 3005 + 3006 + found_resource: 3007 + list_for_each_entry_continue_rcu(resource, &drbd_resources, resources) { 3008 + goto put_result; 3009 + } 3010 + err = 0; 3011 + goto out; 3012 + 3013 + put_result: 3014 + dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, 3015 + cb->nlh->nlmsg_seq, &drbd_genl_family, 3016 + NLM_F_MULTI, DRBD_ADM_GET_RESOURCES); 3017 + err = -ENOMEM; 3018 + if (!dh) 3019 + goto out; 3020 + dh->minor = -1U; 3021 + dh->ret_code = NO_ERROR; 3022 + err = nla_put_drbd_cfg_context(skb, resource, NULL, NULL); 3023 + if (err) 3024 + goto out; 3025 + err = res_opts_to_skb(skb, &resource->res_opts, !capable(CAP_SYS_ADMIN)); 3026 + if (err) 3027 + goto out; 3028 + resource_to_info(&resource_info, resource); 3029 + err = resource_info_to_skb(skb, &resource_info, !capable(CAP_SYS_ADMIN)); 3030 + if (err) 3031 + goto out; 3032 + resource_statistics.res_stat_write_ordering = resource->write_ordering; 3033 + err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN)); 3034 + if (err) 3035 + goto out; 3036 + cb->args[0] = (long)resource; 3037 + genlmsg_end(skb, dh); 3038 + err = 0; 3039 + 3040 + out: 3041 + rcu_read_unlock(); 3042 + if (err) 3043 + return err; 3044 + return skb->len; 3045 + } 3046 + 3047 + static void device_to_statistics(struct device_statistics *s, 3048 + struct drbd_device *device) 3049 + { 3050 + memset(s, 0, sizeof(*s)); 3051 + s->dev_upper_blocked = !may_inc_ap_bio(device); 3052 + if (get_ldev(device)) { 3053 + struct drbd_md *md = &device->ldev->md; 3054 + u64 *history_uuids = (u64 *)s->history_uuids; 3055 + struct request_queue *q; 3056 + int n; 3057 + 3058 + spin_lock_irq(&md->uuid_lock); 3059 + s->dev_current_uuid = md->uuid[UI_CURRENT]; 3060 + BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1); 3061 + for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++) 3062 + history_uuids[n] = md->uuid[UI_HISTORY_START + n]; 3063 + for (; n < HISTORY_UUIDS; n++) 3064 + history_uuids[n] = 0; 3065 + s->history_uuids_len = HISTORY_UUIDS; 3066 + spin_unlock_irq(&md->uuid_lock); 3067 + 3068 + s->dev_disk_flags = md->flags; 3069 + q = bdev_get_queue(device->ldev->backing_bdev); 3070 + s->dev_lower_blocked = 3071 + bdi_congested(&q->backing_dev_info, 3072 + (1 << WB_async_congested) | 3073 + (1 << WB_sync_congested)); 3074 + put_ldev(device); 3075 + } 3076 + s->dev_size = drbd_get_capacity(device->this_bdev); 3077 + s->dev_read = device->read_cnt; 3078 + s->dev_write = device->writ_cnt; 3079 + s->dev_al_writes = device->al_writ_cnt; 3080 + s->dev_bm_writes = device->bm_writ_cnt; 3081 + s->dev_upper_pending = atomic_read(&device->ap_bio_cnt); 3082 + s->dev_lower_pending = atomic_read(&device->local_cnt); 3083 + s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags); 3084 + s->dev_exposed_data_uuid = device->ed_uuid; 3085 + } 3086 + 3087 + static int put_resource_in_arg0(struct netlink_callback *cb, int holder_nr) 3088 + { 3089 + if (cb->args[0]) { 3090 + struct drbd_resource *resource = 3091 + (struct drbd_resource *)cb->args[0]; 3092 + kref_put(&resource->kref, drbd_destroy_resource); 3093 + } 3094 + 3095 + return 0; 3096 + } 3097 + 3098 + int drbd_adm_dump_devices_done(struct netlink_callback *cb) { 3099 + return put_resource_in_arg0(cb, 7); 3100 + } 3101 + 3102 + static void device_to_info(struct device_info *, struct drbd_device *); 3103 + 3104 + int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb) 3105 + { 3106 + struct nlattr *resource_filter; 3107 + struct drbd_resource *resource; 3108 + struct drbd_device *uninitialized_var(device); 3109 + int minor, err, retcode; 3110 + struct drbd_genlmsghdr *dh; 3111 + struct device_info device_info; 3112 + struct device_statistics device_statistics; 3113 + struct idr *idr_to_search; 3114 + 3115 + resource = (struct drbd_resource *)cb->args[0]; 3116 + if (!cb->args[0] && !cb->args[1]) { 3117 + resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name); 3118 + if (resource_filter) { 3119 + retcode = ERR_RES_NOT_KNOWN; 3120 + resource = drbd_find_resource(nla_data(resource_filter)); 3121 + if (!resource) 3122 + goto put_result; 3123 + cb->args[0] = (long)resource; 3124 + } 3125 + } 3126 + 3127 + rcu_read_lock(); 3128 + minor = cb->args[1]; 3129 + idr_to_search = resource ? &resource->devices : &drbd_devices; 3130 + device = idr_get_next(idr_to_search, &minor); 3131 + if (!device) { 3132 + err = 0; 3133 + goto out; 3134 + } 3135 + idr_for_each_entry_continue(idr_to_search, device, minor) { 3136 + retcode = NO_ERROR; 3137 + goto put_result; /* only one iteration */ 3138 + } 3139 + err = 0; 3140 + goto out; /* no more devices */ 3141 + 3142 + put_result: 3143 + dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, 3144 + cb->nlh->nlmsg_seq, &drbd_genl_family, 3145 + NLM_F_MULTI, DRBD_ADM_GET_DEVICES); 3146 + err = -ENOMEM; 3147 + if (!dh) 3148 + goto out; 3149 + dh->ret_code = retcode; 3150 + dh->minor = -1U; 3151 + if (retcode == NO_ERROR) { 3152 + dh->minor = device->minor; 3153 + err = nla_put_drbd_cfg_context(skb, device->resource, NULL, device); 3154 + if (err) 3155 + goto out; 3156 + if (get_ldev(device)) { 3157 + struct disk_conf *disk_conf = 3158 + rcu_dereference(device->ldev->disk_conf); 3159 + 3160 + err = disk_conf_to_skb(skb, disk_conf, !capable(CAP_SYS_ADMIN)); 3161 + put_ldev(device); 3162 + if (err) 3163 + goto out; 3164 + } 3165 + device_to_info(&device_info, device); 3166 + err = device_info_to_skb(skb, &device_info, !capable(CAP_SYS_ADMIN)); 3167 + if (err) 3168 + goto out; 3169 + 3170 + device_to_statistics(&device_statistics, device); 3171 + err = device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN)); 3172 + if (err) 3173 + goto out; 3174 + cb->args[1] = minor + 1; 3175 + } 3176 + genlmsg_end(skb, dh); 3177 + err = 0; 3178 + 3179 + out: 3180 + rcu_read_unlock(); 3181 + if (err) 3182 + return err; 3183 + return skb->len; 3184 + } 3185 + 3186 + int drbd_adm_dump_connections_done(struct netlink_callback *cb) 3187 + { 3188 + return put_resource_in_arg0(cb, 6); 3189 + } 3190 + 3191 + enum { SINGLE_RESOURCE, ITERATE_RESOURCES }; 3192 + 3193 + int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb) 3194 + { 3195 + struct nlattr *resource_filter; 3196 + struct drbd_resource *resource = NULL, *next_resource; 3197 + struct drbd_connection *uninitialized_var(connection); 3198 + int err = 0, retcode; 3199 + struct drbd_genlmsghdr *dh; 3200 + struct connection_info connection_info; 3201 + struct connection_statistics connection_statistics; 3202 + 3203 + rcu_read_lock(); 3204 + resource = (struct drbd_resource *)cb->args[0]; 3205 + if (!cb->args[0]) { 3206 + resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name); 3207 + if (resource_filter) { 3208 + retcode = ERR_RES_NOT_KNOWN; 3209 + resource = drbd_find_resource(nla_data(resource_filter)); 3210 + if (!resource) 3211 + goto put_result; 3212 + cb->args[0] = (long)resource; 3213 + cb->args[1] = SINGLE_RESOURCE; 3214 + } 3215 + } 3216 + if (!resource) { 3217 + if (list_empty(&drbd_resources)) 3218 + goto out; 3219 + resource = list_first_entry(&drbd_resources, struct drbd_resource, resources); 3220 + kref_get(&resource->kref); 3221 + cb->args[0] = (long)resource; 3222 + cb->args[1] = ITERATE_RESOURCES; 3223 + } 3224 + 3225 + next_resource: 3226 + rcu_read_unlock(); 3227 + mutex_lock(&resource->conf_update); 3228 + rcu_read_lock(); 3229 + if (cb->args[2]) { 3230 + for_each_connection_rcu(connection, resource) 3231 + if (connection == (struct drbd_connection *)cb->args[2]) 3232 + goto found_connection; 3233 + /* connection was probably deleted */ 3234 + goto no_more_connections; 3235 + } 3236 + connection = list_entry(&resource->connections, struct drbd_connection, connections); 3237 + 3238 + found_connection: 3239 + list_for_each_entry_continue_rcu(connection, &resource->connections, connections) { 3240 + if (!has_net_conf(connection)) 3241 + continue; 3242 + retcode = NO_ERROR; 3243 + goto put_result; /* only one iteration */ 3244 + } 3245 + 3246 + no_more_connections: 3247 + if (cb->args[1] == ITERATE_RESOURCES) { 3248 + for_each_resource_rcu(next_resource, &drbd_resources) { 3249 + if (next_resource == resource) 3250 + goto found_resource; 3251 + } 3252 + /* resource was probably deleted */ 3253 + } 3254 + goto out; 3255 + 3256 + found_resource: 3257 + list_for_each_entry_continue_rcu(next_resource, &drbd_resources, resources) { 3258 + mutex_unlock(&resource->conf_update); 3259 + kref_put(&resource->kref, drbd_destroy_resource); 3260 + resource = next_resource; 3261 + kref_get(&resource->kref); 3262 + cb->args[0] = (long)resource; 3263 + cb->args[2] = 0; 3264 + goto next_resource; 3265 + } 3266 + goto out; /* no more resources */ 3267 + 3268 + put_result: 3269 + dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, 3270 + cb->nlh->nlmsg_seq, &drbd_genl_family, 3271 + NLM_F_MULTI, DRBD_ADM_GET_CONNECTIONS); 3272 + err = -ENOMEM; 3273 + if (!dh) 3274 + goto out; 3275 + dh->ret_code = retcode; 3276 + dh->minor = -1U; 3277 + if (retcode == NO_ERROR) { 3278 + struct net_conf *net_conf; 3279 + 3280 + err = nla_put_drbd_cfg_context(skb, resource, connection, NULL); 3281 + if (err) 3282 + goto out; 3283 + net_conf = rcu_dereference(connection->net_conf); 3284 + if (net_conf) { 3285 + err = net_conf_to_skb(skb, net_conf, !capable(CAP_SYS_ADMIN)); 3286 + if (err) 3287 + goto out; 3288 + } 3289 + connection_to_info(&connection_info, connection); 3290 + err = connection_info_to_skb(skb, &connection_info, !capable(CAP_SYS_ADMIN)); 3291 + if (err) 3292 + goto out; 3293 + connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags); 3294 + err = connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN)); 3295 + if (err) 3296 + goto out; 3297 + cb->args[2] = (long)connection; 3298 + } 3299 + genlmsg_end(skb, dh); 3300 + err = 0; 3301 + 3302 + out: 3303 + rcu_read_unlock(); 3304 + if (resource) 3305 + mutex_unlock(&resource->conf_update); 3306 + if (err) 3307 + return err; 3308 + return skb->len; 3309 + } 3310 + 3311 + enum mdf_peer_flag { 3312 + MDF_PEER_CONNECTED = 1 << 0, 3313 + MDF_PEER_OUTDATED = 1 << 1, 3314 + MDF_PEER_FENCING = 1 << 2, 3315 + MDF_PEER_FULL_SYNC = 1 << 3, 3316 + }; 3317 + 3318 + static void peer_device_to_statistics(struct peer_device_statistics *s, 3319 + struct drbd_peer_device *peer_device) 3320 + { 3321 + struct drbd_device *device = peer_device->device; 3322 + 3323 + memset(s, 0, sizeof(*s)); 3324 + s->peer_dev_received = device->recv_cnt; 3325 + s->peer_dev_sent = device->send_cnt; 3326 + s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) + 3327 + atomic_read(&device->rs_pending_cnt); 3328 + s->peer_dev_unacked = atomic_read(&device->unacked_cnt); 3329 + s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9); 3330 + s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9); 3331 + if (get_ldev(device)) { 3332 + struct drbd_md *md = &device->ldev->md; 3333 + 3334 + spin_lock_irq(&md->uuid_lock); 3335 + s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP]; 3336 + spin_unlock_irq(&md->uuid_lock); 3337 + s->peer_dev_flags = 3338 + (drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ? 3339 + MDF_PEER_CONNECTED : 0) + 3340 + (drbd_md_test_flag(device->ldev, MDF_CONSISTENT) && 3341 + !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ? 3342 + MDF_PEER_OUTDATED : 0) + 3343 + /* FIXME: MDF_PEER_FENCING? */ 3344 + (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ? 3345 + MDF_PEER_FULL_SYNC : 0); 3346 + put_ldev(device); 3347 + } 3348 + } 3349 + 3350 + int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb) 3351 + { 3352 + return put_resource_in_arg0(cb, 9); 3353 + } 3354 + 3355 + int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb) 3356 + { 3357 + struct nlattr *resource_filter; 3358 + struct drbd_resource *resource; 3359 + struct drbd_device *uninitialized_var(device); 3360 + struct drbd_peer_device *peer_device = NULL; 3361 + int minor, err, retcode; 3362 + struct drbd_genlmsghdr *dh; 3363 + struct idr *idr_to_search; 3364 + 3365 + resource = (struct drbd_resource *)cb->args[0]; 3366 + if (!cb->args[0] && !cb->args[1]) { 3367 + resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name); 3368 + if (resource_filter) { 3369 + retcode = ERR_RES_NOT_KNOWN; 3370 + resource = drbd_find_resource(nla_data(resource_filter)); 3371 + if (!resource) 3372 + goto put_result; 3373 + } 3374 + cb->args[0] = (long)resource; 3375 + } 3376 + 3377 + rcu_read_lock(); 3378 + minor = cb->args[1]; 3379 + idr_to_search = resource ? &resource->devices : &drbd_devices; 3380 + device = idr_find(idr_to_search, minor); 3381 + if (!device) { 3382 + next_device: 3383 + minor++; 3384 + cb->args[2] = 0; 3385 + device = idr_get_next(idr_to_search, &minor); 3386 + if (!device) { 3387 + err = 0; 3388 + goto out; 3389 + } 3390 + } 3391 + if (cb->args[2]) { 3392 + for_each_peer_device(peer_device, device) 3393 + if (peer_device == (struct drbd_peer_device *)cb->args[2]) 3394 + goto found_peer_device; 3395 + /* peer device was probably deleted */ 3396 + goto next_device; 3397 + } 3398 + /* Make peer_device point to the list head (not the first entry). */ 3399 + peer_device = list_entry(&device->peer_devices, struct drbd_peer_device, peer_devices); 3400 + 3401 + found_peer_device: 3402 + list_for_each_entry_continue_rcu(peer_device, &device->peer_devices, peer_devices) { 3403 + if (!has_net_conf(peer_device->connection)) 3404 + continue; 3405 + retcode = NO_ERROR; 3406 + goto put_result; /* only one iteration */ 3407 + } 3408 + goto next_device; 3409 + 3410 + put_result: 3411 + dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, 3412 + cb->nlh->nlmsg_seq, &drbd_genl_family, 3413 + NLM_F_MULTI, DRBD_ADM_GET_PEER_DEVICES); 3414 + err = -ENOMEM; 3415 + if (!dh) 3416 + goto out; 3417 + dh->ret_code = retcode; 3418 + dh->minor = -1U; 3419 + if (retcode == NO_ERROR) { 3420 + struct peer_device_info peer_device_info; 3421 + struct peer_device_statistics peer_device_statistics; 3422 + 3423 + dh->minor = minor; 3424 + err = nla_put_drbd_cfg_context(skb, device->resource, peer_device->connection, device); 3425 + if (err) 3426 + goto out; 3427 + peer_device_to_info(&peer_device_info, peer_device); 3428 + err = peer_device_info_to_skb(skb, &peer_device_info, !capable(CAP_SYS_ADMIN)); 3429 + if (err) 3430 + goto out; 3431 + peer_device_to_statistics(&peer_device_statistics, peer_device); 3432 + err = peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN)); 3433 + if (err) 3434 + goto out; 3435 + cb->args[1] = minor; 3436 + cb->args[2] = (long)peer_device; 3437 + } 3438 + genlmsg_end(skb, dh); 3439 + err = 0; 3440 + 3441 + out: 3442 + rcu_read_unlock(); 3443 + if (err) 3444 + return err; 3445 + return skb->len; 3446 + } 3447 + /* 2975 3448 * Return the connection of @resource if @resource has exactly one connection. 2976 3449 */ 2977 3450 static struct drbd_connection *the_only_connection(struct drbd_resource *resource) ··· 4303 3816 drbd_err(device, "Error %d while broadcasting event. " 4304 3817 "Event seq:%u sib_reason:%u\n", 4305 3818 err, seq, sib->sib_reason); 4306 - } 4307 - 4308 - static void device_to_statistics(struct device_statistics *s, 4309 - struct drbd_device *device) 4310 - { 4311 - memset(s, 0, sizeof(*s)); 4312 - s->dev_upper_blocked = !may_inc_ap_bio(device); 4313 - if (get_ldev(device)) { 4314 - struct drbd_md *md = &device->ldev->md; 4315 - u64 *history_uuids = (u64 *)s->history_uuids; 4316 - struct request_queue *q; 4317 - int n; 4318 - 4319 - spin_lock_irq(&md->uuid_lock); 4320 - s->dev_current_uuid = md->uuid[UI_CURRENT]; 4321 - BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1); 4322 - for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++) 4323 - history_uuids[n] = md->uuid[UI_HISTORY_START + n]; 4324 - for (; n < HISTORY_UUIDS; n++) 4325 - history_uuids[n] = 0; 4326 - s->history_uuids_len = HISTORY_UUIDS; 4327 - spin_unlock_irq(&md->uuid_lock); 4328 - 4329 - s->dev_disk_flags = md->flags; 4330 - q = bdev_get_queue(device->ldev->backing_bdev); 4331 - s->dev_lower_blocked = 4332 - bdi_congested(&q->backing_dev_info, 4333 - (1 << WB_async_congested) | 4334 - (1 << WB_sync_congested)); 4335 - put_ldev(device); 4336 - } 4337 - s->dev_size = drbd_get_capacity(device->this_bdev); 4338 - s->dev_read = device->read_cnt; 4339 - s->dev_write = device->writ_cnt; 4340 - s->dev_al_writes = device->al_writ_cnt; 4341 - s->dev_bm_writes = device->bm_writ_cnt; 4342 - s->dev_upper_pending = atomic_read(&device->ap_bio_cnt); 4343 - s->dev_lower_pending = atomic_read(&device->local_cnt); 4344 - s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags); 4345 - s->dev_exposed_data_uuid = device->ed_uuid; 4346 - } 4347 - 4348 - enum mdf_peer_flag { 4349 - MDF_PEER_CONNECTED = 1 << 0, 4350 - MDF_PEER_OUTDATED = 1 << 1, 4351 - MDF_PEER_FENCING = 1 << 2, 4352 - MDF_PEER_FULL_SYNC = 1 << 3, 4353 - }; 4354 - 4355 - static void peer_device_to_statistics(struct peer_device_statistics *s, 4356 - struct drbd_peer_device *peer_device) 4357 - { 4358 - struct drbd_device *device = peer_device->device; 4359 - 4360 - memset(s, 0, sizeof(*s)); 4361 - s->peer_dev_received = device->recv_cnt; 4362 - s->peer_dev_sent = device->send_cnt; 4363 - s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) + 4364 - atomic_read(&device->rs_pending_cnt); 4365 - s->peer_dev_unacked = atomic_read(&device->unacked_cnt); 4366 - s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9); 4367 - s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9); 4368 - if (get_ldev(device)) { 4369 - struct drbd_md *md = &device->ldev->md; 4370 - 4371 - spin_lock_irq(&md->uuid_lock); 4372 - s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP]; 4373 - spin_unlock_irq(&md->uuid_lock); 4374 - s->peer_dev_flags = 4375 - (drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ? 4376 - MDF_PEER_CONNECTED : 0) + 4377 - (drbd_md_test_flag(device->ldev, MDF_CONSISTENT) && 4378 - !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ? 4379 - MDF_PEER_OUTDATED : 0) + 4380 - /* FIXME: MDF_PEER_FENCING? */ 4381 - (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ? 4382 - MDF_PEER_FULL_SYNC : 0); 4383 - put_ldev(device); 4384 - } 4385 3819 } 4386 3820 4387 3821 static int nla_put_notification_header(struct sk_buff *msg,
+35
include/linux/drbd_genl.h
··· 453 453 GENL_op(DRBD_ADM_DOWN, 27, GENL_doit(drbd_adm_down), 454 454 GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)) 455 455 456 + GENL_op(DRBD_ADM_GET_RESOURCES, 30, 457 + GENL_op_init( 458 + .dumpit = drbd_adm_dump_resources, 459 + ), 460 + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY) 461 + GENL_tla_expected(DRBD_NLA_RESOURCE_INFO, DRBD_GENLA_F_MANDATORY) 462 + GENL_tla_expected(DRBD_NLA_RESOURCE_STATISTICS, DRBD_GENLA_F_MANDATORY)) 463 + 464 + GENL_op(DRBD_ADM_GET_DEVICES, 31, 465 + GENL_op_init( 466 + .dumpit = drbd_adm_dump_devices, 467 + .done = drbd_adm_dump_devices_done, 468 + ), 469 + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY) 470 + GENL_tla_expected(DRBD_NLA_DEVICE_INFO, DRBD_GENLA_F_MANDATORY) 471 + GENL_tla_expected(DRBD_NLA_DEVICE_STATISTICS, DRBD_GENLA_F_MANDATORY)) 472 + 473 + GENL_op(DRBD_ADM_GET_CONNECTIONS, 32, 474 + GENL_op_init( 475 + .dumpit = drbd_adm_dump_connections, 476 + .done = drbd_adm_dump_connections_done, 477 + ), 478 + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY) 479 + GENL_tla_expected(DRBD_NLA_CONNECTION_INFO, DRBD_GENLA_F_MANDATORY) 480 + GENL_tla_expected(DRBD_NLA_CONNECTION_STATISTICS, DRBD_GENLA_F_MANDATORY)) 481 + 482 + GENL_op(DRBD_ADM_GET_PEER_DEVICES, 33, 483 + GENL_op_init( 484 + .dumpit = drbd_adm_dump_peer_devices, 485 + .done = drbd_adm_dump_peer_devices_done, 486 + ), 487 + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY) 488 + GENL_tla_expected(DRBD_NLA_PEER_DEVICE_INFO, DRBD_GENLA_F_MANDATORY) 489 + GENL_tla_expected(DRBD_NLA_PEER_DEVICE_STATISTICS, DRBD_GENLA_F_MANDATORY)) 490 + 456 491 GENL_notification( 457 492 DRBD_RESOURCE_STATE, 34, events, 458 493 GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+14
include/linux/idr.h
··· 135 135 #define idr_for_each_entry(idp, entry, id) \ 136 136 for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) 137 137 138 + /** 139 + * idr_for_each_entry - continue iteration over an idr's elements of a given type 140 + * @idp: idr handle 141 + * @entry: the type * to use as cursor 142 + * @id: id entry's key 143 + * 144 + * Continue to iterate over list of given type, continuing after 145 + * the current position. 146 + */ 147 + #define idr_for_each_entry_continue(idp, entry, id) \ 148 + for ((entry) = idr_get_next((idp), &(id)); \ 149 + entry; \ 150 + ++id, (entry) = idr_get_next((idp), &(id))) 151 + 138 152 /* 139 153 * IDA - IDR based id allocator, use when translation from id to 140 154 * pointer isn't necessary.