Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm : Insert blank lines after declarations.

Resolve checkpatch issues for missing blank lines after declarations.
Issues found in multiple files with checkpatch.pl.

Signed-off-by: Suraj Upadhyay <usuraj35@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20200702131749.GA25710@blackclown

authored by

Suraj Upadhyay and committed by
Daniel Vetter
948de842 4cca2e64

+74
+1
drivers/gpu/drm/drm_atomic.c
··· 575 575 fb->modifier); 576 576 if (ret) { 577 577 struct drm_format_name_buf format_name; 578 + 578 579 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n", 579 580 plane->base.id, plane->name, 580 581 drm_get_format_name(fb->format->format,
+7
drivers/gpu/drm/drm_atomic_uapi.c
··· 522 522 523 523 if (property == config->prop_fb_id) { 524 524 struct drm_framebuffer *fb; 525 + 525 526 fb = drm_framebuffer_lookup(dev, file_priv, val); 526 527 drm_atomic_set_fb_for_plane(state, fb); 527 528 if (fb) ··· 540 539 541 540 } else if (property == config->prop_crtc_id) { 542 541 struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val); 542 + 543 543 if (val && !crtc) 544 544 return -EACCES; 545 545 return drm_atomic_set_crtc_for_plane(state, crtc); ··· 683 681 684 682 if (property == config->prop_crtc_id) { 685 683 struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val); 684 + 686 685 if (val && !crtc) 687 686 return -EACCES; 688 687 return drm_atomic_set_crtc_for_connector(state, crtc); ··· 757 754 } else if (property == config->writeback_fb_id_property) { 758 755 struct drm_framebuffer *fb; 759 756 int ret; 757 + 760 758 fb = drm_framebuffer_lookup(dev, file_priv, val); 761 759 ret = drm_atomic_set_writeback_fb_for_connector(state, fb); 762 760 if (fb) ··· 865 861 switch (obj->type) { 866 862 case DRM_MODE_OBJECT_CONNECTOR: { 867 863 struct drm_connector *connector = obj_to_connector(obj); 864 + 868 865 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 869 866 ret = drm_atomic_connector_get_property(connector, 870 867 connector->state, property, val); ··· 873 868 } 874 869 case DRM_MODE_OBJECT_CRTC: { 875 870 struct drm_crtc *crtc = obj_to_crtc(obj); 871 + 876 872 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 877 873 ret = drm_atomic_crtc_get_property(crtc, 878 874 crtc->state, property, val); ··· 881 875 } 882 876 case DRM_MODE_OBJECT_PLANE: { 883 877 struct drm_plane *plane = obj_to_plane(obj); 878 + 884 879 WARN_ON(!drm_modeset_is_locked(&plane->mutex)); 885 880 ret = drm_atomic_plane_get_property(plane, 886 881 plane->state, property, val);
+6
drivers/gpu/drm/drm_bufs.c
··· 53 53 struct drm_local_map *map) 54 54 { 55 55 struct drm_map_list *entry; 56 + 56 57 list_for_each_entry(entry, &dev->maplist, head) { 57 58 /* 58 59 * Because the kernel-userspace ABI is fixed at a 32-bit offset ··· 103 102 104 103 if (!use_hashed_handle) { 105 104 int ret; 105 + 106 106 hash->key = user_token >> PAGE_SHIFT; 107 107 ret = drm_ht_insert_item(&dev->map_hash, hash); 108 108 if (ret != -EINVAL) ··· 393 391 unsigned int token) 394 392 { 395 393 struct drm_map_list *_entry; 394 + 396 395 list_for_each_entry(_entry, &dev->maplist, head) 397 396 if (_entry->user_token == token) 398 397 return _entry->map; ··· 1326 1323 if (*p >= count) { 1327 1324 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1328 1325 struct drm_buf_entry *from = &dma->bufs[i]; 1326 + 1329 1327 if (from->buf_count) { 1330 1328 if (f(data, count, from) < 0) 1331 1329 return -EFAULT; ··· 1363 1359 struct drm_file *file_priv) 1364 1360 { 1365 1361 struct drm_buf_info *request = data; 1362 + 1366 1363 return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf); 1367 1364 } 1368 1365 ··· 1575 1570 struct drm_file *file_priv) 1576 1571 { 1577 1572 struct drm_buf_map *request = data; 1573 + 1578 1574 return __drm_legacy_mapbufs(dev, data, &request->count, 1579 1575 &request->virtual, map_one_buf, 1580 1576 file_priv);
+2
drivers/gpu/drm/drm_connector.c
··· 2409 2409 { 2410 2410 struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount); 2411 2411 struct drm_device *dev = tg->dev; 2412 + 2412 2413 mutex_lock(&dev->mode_config.idr_mutex); 2413 2414 idr_remove(&dev->mode_config.tile_idr, tg->id); 2414 2415 mutex_unlock(&dev->mode_config.idr_mutex); ··· 2445 2444 { 2446 2445 struct drm_tile_group *tg; 2447 2446 int id; 2447 + 2448 2448 mutex_lock(&dev->mode_config.idr_mutex); 2449 2449 idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) { 2450 2450 if (!memcmp(tg->group_data, topology, 8)) {
+1
drivers/gpu/drm/drm_crtc.c
··· 656 656 fb->modifier); 657 657 if (ret) { 658 658 struct drm_format_name_buf format_name; 659 + 659 660 DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n", 660 661 drm_get_format_name(fb->format->format, 661 662 &format_name),
+3
drivers/gpu/drm/drm_crtc_helper.c
··· 185 185 186 186 drm_for_each_crtc(crtc, dev) { 187 187 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 188 + 188 189 crtc->enabled = drm_helper_crtc_in_use(crtc); 189 190 if (!crtc->enabled) { 190 191 if (crtc_funcs->disable) ··· 885 884 if (mode < old_dpms) { 886 885 if (crtc) { 887 886 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 887 + 888 888 if (crtc_funcs->dpms) 889 889 (*crtc_funcs->dpms) (crtc, 890 890 drm_helper_choose_crtc_dpms(crtc)); ··· 900 898 drm_helper_encoder_dpms(encoder, encoder_dpms); 901 899 if (crtc) { 902 900 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 901 + 903 902 if (crtc_funcs->dpms) 904 903 (*crtc_funcs->dpms) (crtc, 905 904 drm_helper_choose_crtc_dpms(crtc));
+1
drivers/gpu/drm/drm_dp_helper.c
··· 57 57 int i = DP_LANE0_1_STATUS + (lane >> 1); 58 58 int s = (lane & 1) * 4; 59 59 u8 l = dp_link_status(link_status, i); 60 + 60 61 return (l >> s) & 0xf; 61 62 } 62 63
+20
drivers/gpu/drm/drm_dp_mst_topology.c
··· 259 259 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr) 260 260 { 261 261 u8 size = 3; 262 + 262 263 size += (hdr->lct / 2); 263 264 return size; 264 265 } ··· 270 269 int idx = 0; 271 270 int i; 272 271 u8 crc4; 272 + 273 273 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf); 274 274 for (i = 0; i < (hdr->lct / 2); i++) 275 275 buf[idx++] = hdr->rad[i]; ··· 291 289 u8 len; 292 290 int i; 293 291 u8 idx; 292 + 294 293 if (buf[0] == 0) 295 294 return false; 296 295 len = 3; ··· 329 326 int idx = 0; 330 327 int i; 331 328 u8 *buf = raw->msg; 329 + 332 330 buf[idx++] = req->req_type & 0x7f; 333 331 334 332 switch (req->req_type) { ··· 677 673 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len) 678 674 { 679 675 u8 crc4; 676 + 680 677 crc4 = drm_dp_msg_data_crc4(msg, len); 681 678 msg[len] = crc4; 682 679 } ··· 752 747 { 753 748 int idx = 1; 754 749 int i; 750 + 755 751 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16); 756 752 idx += 16; 757 753 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf; ··· 804 798 struct drm_dp_sideband_msg_reply_body *repmsg) 805 799 { 806 800 int idx = 1; 801 + 807 802 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf; 808 803 idx++; 809 804 if (idx > raw->curlen) ··· 825 818 struct drm_dp_sideband_msg_reply_body *repmsg) 826 819 { 827 820 int idx = 1; 821 + 828 822 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf; 829 823 idx++; 830 824 if (idx > raw->curlen) ··· 859 851 struct drm_dp_sideband_msg_reply_body *repmsg) 860 852 { 861 853 int idx = 1; 854 + 862 855 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; 863 856 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1; 864 857 idx++; ··· 883 874 struct drm_dp_sideband_msg_reply_body *repmsg) 884 875 { 885 876 int idx = 1; 877 + 886 878 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf; 887 879 idx++; 888 880 if (idx > raw->curlen) ··· 906 896 struct drm_dp_sideband_msg_reply_body *repmsg) 907 897 { 908 898 int idx = 1; 899 + 909 900 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf; 910 901 idx++; 911 902 if (idx > raw->curlen) ··· 1093 1082 u8 *sdp_stream_sink) 1094 1083 { 1095 1084 struct drm_dp_sideband_msg_req_body req; 1085 + 1096 1086 memset(&req, 0, sizeof(req)); 1097 1087 req.req_type = DP_ALLOCATE_PAYLOAD; 1098 1088 req.u.allocate_payload.port_number = port_num; ··· 1154 1142 int vcpi) 1155 1143 { 1156 1144 int i; 1145 + 1157 1146 if (vcpi == 0) 1158 1147 return; 1159 1148 ··· 1953 1940 int parent_lct = port->parent->lct; 1954 1941 int shift = 4; 1955 1942 int idx = (parent_lct - 1) / 2; 1943 + 1956 1944 if (parent_lct > 1) { 1957 1945 memcpy(rad, port->parent->rad, idx + 1); 1958 1946 shift = (parent_lct % 2) ? 4 : 0; ··· 2132 2118 { 2133 2119 int i; 2134 2120 char temp[8]; 2121 + 2135 2122 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); 2136 2123 for (i = 0; i < (mstb->lct - 1); i++) { 2137 2124 int shift = (i % 2) ? 0 : 4; 2138 2125 int port_num = (mstb->rad[i / 2] >> shift) & 0xf; 2126 + 2139 2127 snprintf(temp, sizeof(temp), "-%d", port_num); 2140 2128 strlcat(proppath, temp, proppath_size); 2141 2129 } ··· 3174 3158 struct drm_dp_payload *payload) 3175 3159 { 3176 3160 int ret; 3161 + 3177 3162 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn); 3178 3163 if (ret < 0) 3179 3164 return ret; ··· 3331 3314 struct drm_dp_mst_port *port; 3332 3315 int i; 3333 3316 int ret = 0; 3317 + 3334 3318 mutex_lock(&mgr->payload_lock); 3335 3319 for (i = 0; i < mgr->max_payloads; i++) { 3336 3320 ··· 3797 3779 /* Were we actually expecting a response, and from this mstb? */ 3798 3780 if (!txmsg || txmsg->dst != mstb) { 3799 3781 struct drm_dp_sideband_msg_hdr *hdr; 3782 + 3800 3783 hdr = &msg->initial_hdr; 3801 3784 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n", 3802 3785 mstb, hdr->seqno, hdr->lct, hdr->rad[0], ··· 4345 4326 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 4346 4327 { 4347 4328 int slots = 0; 4329 + 4348 4330 port = drm_dp_mst_topology_get_port_validated(mgr, port); 4349 4331 if (!port) 4350 4332 return slots;
+17
drivers/gpu/drm/drm_edid.c
··· 1672 1672 1673 1673 if (block == 0) { 1674 1674 int score = drm_edid_header_is_valid(raw_edid); 1675 + 1675 1676 if (score == 8) { 1676 1677 if (edid_corrupt) 1677 1678 *edid_corrupt = false; ··· 2224 2223 2225 2224 for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) { 2226 2225 const struct drm_display_mode *ptr = &drm_dmt_modes[i]; 2226 + 2227 2227 if (hsize != ptr->hdisplay) 2228 2228 continue; 2229 2229 if (vsize != ptr->vdisplay) ··· 2296 2294 2297 2295 for (i = 1; i <= raw_edid[0x7e]; i++) { 2298 2296 u8 *ext = raw_edid + (i * EDID_LENGTH); 2297 + 2299 2298 switch (*ext) { 2300 2299 case CEA_EXT: 2301 2300 cea_for_each_detailed_block(ext, cb, closure); ··· 2328 2325 { 2329 2326 if (edid->revision >= 4) { 2330 2327 bool ret = false; 2328 + 2331 2329 drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); 2332 2330 return ret; 2333 2331 } ··· 2353 2349 drm_gtf2_hbreak(struct edid *edid) 2354 2350 { 2355 2351 u8 *r = NULL; 2352 + 2356 2353 drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); 2357 2354 return r ? (r[12] * 2) : 0; 2358 2355 } ··· 2362 2357 drm_gtf2_2c(struct edid *edid) 2363 2358 { 2364 2359 u8 *r = NULL; 2360 + 2365 2361 drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); 2366 2362 return r ? r[13] : 0; 2367 2363 } ··· 2371 2365 drm_gtf2_m(struct edid *edid) 2372 2366 { 2373 2367 u8 *r = NULL; 2368 + 2374 2369 drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); 2375 2370 return r ? (r[15] << 8) + r[14] : 0; 2376 2371 } ··· 2380 2373 drm_gtf2_k(struct edid *edid) 2381 2374 { 2382 2375 u8 *r = NULL; 2376 + 2383 2377 drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); 2384 2378 return r ? r[16] : 0; 2385 2379 } ··· 2389 2381 drm_gtf2_2j(struct edid *edid) 2390 2382 { 2391 2383 u8 *r = NULL; 2384 + 2392 2385 drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); 2393 2386 return r ? r[17] : 0; 2394 2387 } ··· 2841 2832 2842 2833 for (i = 0; i < ARRAY_SIZE(extra_modes); i++) { 2843 2834 const struct minimode *m = &extra_modes[i]; 2835 + 2844 2836 newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0); 2845 2837 if (!newmode) 2846 2838 return modes; ··· 2871 2861 2872 2862 for (i = 0; i < ARRAY_SIZE(extra_modes); i++) { 2873 2863 const struct minimode *m = &extra_modes[i]; 2864 + 2874 2865 newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0); 2875 2866 if (!newmode) 2876 2867 return modes; ··· 3007 2996 for (i = 0; i <= EDID_EST_TIMINGS; i++) { 3008 2997 if (est_bits & (1<<i)) { 3009 2998 struct drm_display_mode *newmode; 2999 + 3010 3000 newmode = drm_mode_duplicate(dev, &edid_est_modes[i]); 3011 3001 if (newmode) { 3012 3002 drm_mode_probed_add(connector, newmode); ··· 3096 3084 3097 3085 for (i = 0; i < 4; i++) { 3098 3086 int uninitialized_var(width), height; 3087 + 3099 3088 cvt = &(timing->data.other_data.data.cvt[i]); 3100 3089 3101 3090 if (!memcmp(cvt->code, empty, 3)) ··· 3739 3726 3740 3727 for (i = 0; i < len; i++) { 3741 3728 struct drm_display_mode *mode; 3729 + 3742 3730 mode = drm_display_mode_from_vic_index(connector, db, len, i); 3743 3731 if (mode) { 3744 3732 /* ··· 4581 4567 4582 4568 if (cea_db_tag(db) == AUDIO_BLOCK) { 4583 4569 int j; 4570 + 4584 4571 dbl = cea_db_payload_len(db); 4585 4572 4586 4573 count = dbl / 3; /* SAD is 3B */ ··· 5185 5170 unsigned vsync_width = (timings->vsw[0] | timings->vsw[1] << 8) + 1; 5186 5171 bool hsync_positive = (timings->hsync[1] >> 7) & 0x1; 5187 5172 bool vsync_positive = (timings->vsync[1] >> 7) & 0x1; 5173 + 5188 5174 mode = drm_mode_create(dev); 5189 5175 if (!mode) 5190 5176 return NULL; ··· 5367 5351 5368 5352 for (i = 0; i < count; i++) { 5369 5353 const struct drm_display_mode *ptr = &drm_dmt_modes[i]; 5354 + 5370 5355 if (hdisplay && vdisplay) { 5371 5356 /* 5372 5357 * Only when two are valid, they will be used to check
+2
drivers/gpu/drm/drm_file.c
··· 375 375 */ 376 376 if (!dev->hose) { 377 377 struct pci_dev *pci_dev; 378 + 378 379 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); 379 380 if (pci_dev) { 380 381 dev->hose = pci_dev->sysdata; ··· 759 758 struct drm_pending_event *p) 760 759 { 761 760 unsigned long flags; 761 + 762 762 spin_lock_irqsave(&dev->event_lock, flags); 763 763 if (p->file_priv) { 764 764 p->file_priv->event_space += p->event->length;
+1
drivers/gpu/drm/drm_framebuffer.c
··· 1110 1110 if (drm_framebuffer_read_refcount(fb) > 1) { 1111 1111 if (drm_drv_uses_atomic_modeset(dev)) { 1112 1112 int ret = atomic_remove_fb(fb); 1113 + 1113 1114 WARN(ret, "atomic remove_fb failed with %i\n", ret); 1114 1115 } else 1115 1116 legacy_remove_fb(fb);
+2
drivers/gpu/drm/drm_ioc32.c
··· 388 388 struct drm_file *file_priv) 389 389 { 390 390 drm_buf_info32_t *request = data; 391 + 391 392 return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf32); 392 393 } 393 394 ··· 814 813 unsigned long arg) 815 814 { 816 815 drm_update_draw32_t update32; 816 + 817 817 if (copy_from_user(&update32, (void __user *)arg, sizeof(update32))) 818 818 return -EFAULT; 819 819
+4
drivers/gpu/drm/drm_lease.c
··· 166 166 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 167 167 if (_drm_lease_held_master(master, crtc->base.id)) { 168 168 uint32_t mask_in = 1ul << count_in; 169 + 169 170 if ((crtcs_in & mask_in) != 0) { 170 171 uint32_t mask_out = 1ul << count_out; 172 + 171 173 crtcs_out |= mask_out; 172 174 } 173 175 count_out++; ··· 425 423 for (o = 0; o < object_count; o++) { 426 424 struct drm_mode_object *obj = objects[o]; 427 425 u32 object_id = objects[o]->id; 426 + 428 427 DRM_DEBUG_LEASE("Adding object %d to lease\n", object_id); 429 428 430 429 /* ··· 444 441 } 445 442 if (obj->type == DRM_MODE_OBJECT_CRTC && !universal_planes) { 446 443 struct drm_crtc *crtc = obj_to_crtc(obj); 444 + 447 445 ret = idr_alloc(leases, &drm_lease_idr_object, crtc->primary->base.id, crtc->primary->base.id + 1, GFP_KERNEL); 448 446 if (ret < 0) { 449 447 DRM_DEBUG_LEASE("Object primary plane %d cannot be inserted into leases (%d)\n",
+1
drivers/gpu/drm/drm_lock.c
··· 330 330 struct drm_file *file_priv) 331 331 { 332 332 struct drm_master *master = file_priv->master; 333 + 333 334 return (file_priv->lock_count && master->lock.hw_lock && 334 335 _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && 335 336 master->lock.file_priv == file_priv);
+1
drivers/gpu/drm/drm_mode_config.c
··· 538 538 WARN_ON(!list_empty(&dev->mode_config.fb_list)); 539 539 list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { 540 540 struct drm_printer p = drm_debug_printer("[leaked fb]"); 541 + 541 542 drm_printf(&p, "framebuffer[%u]:\n", fb->base.id); 542 543 drm_framebuffer_print_info(&p, 1, fb); 543 544 drm_framebuffer_free(&fb->base.refcount);
+1
drivers/gpu/drm/drm_pci.c
··· 298 298 void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) 299 299 { 300 300 struct drm_device *dev, *tmp; 301 + 301 302 DRM_DEBUG("\n"); 302 303 303 304 if (!(driver->driver_features & DRIVER_LEGACY)) {
+1
drivers/gpu/drm/drm_plane.c
··· 216 216 217 217 if (format_modifiers) { 218 218 const uint64_t *temp_modifiers = format_modifiers; 219 + 219 220 while (*temp_modifiers++ != DRM_FORMAT_MOD_INVALID) 220 221 format_modifier_count++; 221 222 }
+1
drivers/gpu/drm/drm_prime.c
··· 1014 1014 { 1015 1015 struct dma_buf_attachment *attach; 1016 1016 struct dma_buf *dma_buf; 1017 + 1017 1018 attach = obj->import_attach; 1018 1019 if (sg) 1019 1020 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+1
drivers/gpu/drm/drm_syncobj.c
··· 1188 1188 uint32_t count) 1189 1189 { 1190 1190 uint32_t i; 1191 + 1191 1192 for (i = 0; i < count; i++) 1192 1193 drm_syncobj_put(syncobjs[i]); 1193 1194 kfree(syncobjs);
+1
drivers/gpu/drm/drm_vblank.c
··· 1623 1623 e->event.vbl.crtc_id = 0; 1624 1624 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1625 1625 struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); 1626 + 1626 1627 if (crtc) 1627 1628 e->event.vbl.crtc_id = crtc->base.id; 1628 1629 }