Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB/core: Enforce security on management datagrams

Allocate and free a security context when creating and destroying a MAD
agent. This context is used for controlling access to PKeys and sending
and receiving SMPs.

When sending or receiving a MAD check that the agent has permission to
access the PKey for the Subnet Prefix of the port.

During MAD and snoop agent registration for SMI QPs check that the
calling process has permission to access the manage the subnet and
register a callback with the LSM to be notified of policy changes. When
notificaiton of a policy change occurs recheck permission and set a flag
indicating sending and receiving SMPs is allowed.

When sending and receiving MADs check that the agent has access to the
SMI if it's on an SMI QP. Because security policy can change it's
possible permission was allowed when creating the agent, but no longer
is.

Signed-off-by: Daniel Jurgens <danielj@mellanox.com>
Acked-by: Doug Ledford <dledford@redhat.com>
[PM: remove the LSM hook init code]
Signed-off-by: Paul Moore <paul@paul-moore.com>

authored by

Daniel Jurgens and committed by
Paul Moore
47a2b338 8f408ab6

+195 -8
+35
drivers/infiniband/core/core_priv.h
··· 38 38 #include <linux/cgroup_rdma.h> 39 39 40 40 #include <rdma/ib_verbs.h> 41 + #include <rdma/ib_mad.h> 42 + #include "mad_priv.h" 41 43 42 44 struct pkey_index_qp_list { 43 45 struct list_head pkey_index_list; ··· 191 189 u64 *sn_pfx); 192 190 193 191 #ifdef CONFIG_SECURITY_INFINIBAND 192 + int ib_security_pkey_access(struct ib_device *dev, 193 + u8 port_num, 194 + u16 pkey_index, 195 + void *sec); 196 + 194 197 void ib_security_destroy_port_pkey_list(struct ib_device *device); 195 198 196 199 void ib_security_cache_change(struct ib_device *device, ··· 213 206 void ib_destroy_qp_security_end(struct ib_qp_security *sec); 214 207 int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev); 215 208 void ib_close_shared_qp_security(struct ib_qp_security *sec); 209 + int ib_mad_agent_security_setup(struct ib_mad_agent *agent, 210 + enum ib_qp_type qp_type); 211 + void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent); 212 + int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index); 216 213 #else 214 + static inline int ib_security_pkey_access(struct ib_device *dev, 215 + u8 port_num, 216 + u16 pkey_index, 217 + void *sec) 218 + { 219 + return 0; 220 + } 221 + 217 222 static inline void ib_security_destroy_port_pkey_list(struct ib_device *device) 218 223 { 219 224 } ··· 273 254 274 255 static inline void ib_close_shared_qp_security(struct ib_qp_security *sec) 275 256 { 257 + } 258 + 259 + static inline int ib_mad_agent_security_setup(struct ib_mad_agent *agent, 260 + enum ib_qp_type qp_type) 261 + { 262 + return 0; 263 + } 264 + 265 + static inline void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) 266 + { 267 + } 268 + 269 + static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, 270 + u16 pkey_index) 271 + { 272 + return 0; 276 273 } 277 274 #endif 278 275 #endif /* _CORE_PRIV_H */
+44 -8
drivers/infiniband/core/mad.c
··· 40 40 #include <linux/dma-mapping.h> 41 41 #include <linux/slab.h> 42 42 #include <linux/module.h> 43 + #include <linux/security.h> 43 44 #include <rdma/ib_cache.h> 44 45 45 46 #include "mad_priv.h" 47 + #include "core_priv.h" 46 48 #include "mad_rmpp.h" 47 49 #include "smi.h" 48 50 #include "opa_smi.h" ··· 371 369 atomic_set(&mad_agent_priv->refcount, 1); 372 370 init_completion(&mad_agent_priv->comp); 373 371 372 + ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); 373 + if (ret2) { 374 + ret = ERR_PTR(ret2); 375 + goto error4; 376 + } 377 + 374 378 spin_lock_irqsave(&port_priv->reg_lock, flags); 375 379 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; 376 380 ··· 394 386 if (method) { 395 387 if (method_in_use(&method, 396 388 mad_reg_req)) 397 - goto error4; 389 + goto error5; 398 390 } 399 391 } 400 392 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, ··· 410 402 if (is_vendor_method_in_use( 411 403 vendor_class, 412 404 mad_reg_req)) 413 - goto error4; 405 + goto error5; 414 406 } 415 407 } 416 408 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); 417 409 } 418 410 if (ret2) { 419 411 ret = ERR_PTR(ret2); 420 - goto error4; 412 + goto error5; 421 413 } 422 414 } 423 415 ··· 426 418 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 427 419 428 420 return &mad_agent_priv->agent; 429 - 430 - error4: 421 + error5: 431 422 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 423 + ib_mad_agent_security_cleanup(&mad_agent_priv->agent); 424 + error4: 432 425 kfree(reg_req); 433 426 error3: 434 427 kfree(mad_agent_priv); ··· 500 491 struct ib_mad_agent *ret; 501 492 struct ib_mad_snoop_private *mad_snoop_priv; 502 493 int qpn; 494 + int err; 503 495 504 496 /* Validate parameters */ 505 497 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || ··· 535 525 mad_snoop_priv->agent.port_num = port_num; 536 526 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; 537 527 init_completion(&mad_snoop_priv->comp); 528 + 529 + err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type); 530 + if (err) { 531 + ret = ERR_PTR(err); 532 + goto error2; 533 + } 534 + 538 535 mad_snoop_priv->snoop_index = register_snoop_agent( 539 536 &port_priv->qp_info[qpn], 540 537 mad_snoop_priv); 541 538 if (mad_snoop_priv->snoop_index < 0) { 542 539 ret = ERR_PTR(mad_snoop_priv->snoop_index); 543 - goto error2; 540 + goto error3; 544 541 } 545 542 546 543 atomic_set(&mad_snoop_priv->refcount, 1); 547 544 return &mad_snoop_priv->agent; 548 - 545 + error3: 546 + ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); 549 547 error2: 550 548 kfree(mad_snoop_priv); 551 549 error1: ··· 599 581 deref_mad_agent(mad_agent_priv); 600 582 wait_for_completion(&mad_agent_priv->comp); 601 583 584 + ib_mad_agent_security_cleanup(&mad_agent_priv->agent); 585 + 602 586 kfree(mad_agent_priv->reg_req); 603 587 kfree(mad_agent_priv); 604 588 } ··· 618 598 619 599 deref_snoop_agent(mad_snoop_priv); 620 600 wait_for_completion(&mad_snoop_priv->comp); 601 + 602 + ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); 621 603 622 604 kfree(mad_snoop_priv); 623 605 } ··· 1237 1215 1238 1216 /* Walk list of send WRs and post each on send list */ 1239 1217 for (; send_buf; send_buf = next_send_buf) { 1240 - 1241 1218 mad_send_wr = container_of(send_buf, 1242 1219 struct ib_mad_send_wr_private, 1243 1220 send_buf); 1244 1221 mad_agent_priv = mad_send_wr->mad_agent_priv; 1222 + 1223 + ret = ib_mad_enforce_security(mad_agent_priv, 1224 + mad_send_wr->send_wr.pkey_index); 1225 + if (ret) 1226 + goto error; 1245 1227 1246 1228 if (!send_buf->mad_agent->send_handler || 1247 1229 (send_buf->timeout_ms && ··· 1972 1946 struct ib_mad_send_wr_private *mad_send_wr; 1973 1947 struct ib_mad_send_wc mad_send_wc; 1974 1948 unsigned long flags; 1949 + int ret; 1950 + 1951 + ret = ib_mad_enforce_security(mad_agent_priv, 1952 + mad_recv_wc->wc->pkey_index); 1953 + if (ret) { 1954 + ib_free_recv_mad(mad_recv_wc); 1955 + deref_mad_agent(mad_agent_priv); 1956 + } 1975 1957 1976 1958 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 1977 1959 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); ··· 2037 2003 mad_recv_wc); 2038 2004 deref_mad_agent(mad_agent_priv); 2039 2005 } 2006 + 2007 + return; 2040 2008 } 2041 2009 2042 2010 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
+92
drivers/infiniband/core/security.c
··· 39 39 #include <rdma/ib_verbs.h> 40 40 #include <rdma/ib_cache.h> 41 41 #include "core_priv.h" 42 + #include "mad_priv.h" 42 43 43 44 static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp) 44 45 { ··· 610 609 return ret; 611 610 } 612 611 EXPORT_SYMBOL(ib_security_modify_qp); 612 + 613 + int ib_security_pkey_access(struct ib_device *dev, 614 + u8 port_num, 615 + u16 pkey_index, 616 + void *sec) 617 + { 618 + u64 subnet_prefix; 619 + u16 pkey; 620 + int ret; 621 + 622 + ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey); 623 + if (ret) 624 + return ret; 625 + 626 + ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix); 627 + 628 + if (ret) 629 + return ret; 630 + 631 + return security_ib_pkey_access(sec, subnet_prefix, pkey); 632 + } 633 + EXPORT_SYMBOL(ib_security_pkey_access); 634 + 635 + static int ib_mad_agent_security_change(struct notifier_block *nb, 636 + unsigned long event, 637 + void *data) 638 + { 639 + struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb); 640 + 641 + if (event != LSM_POLICY_CHANGE) 642 + return NOTIFY_DONE; 643 + 644 + ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security, 645 + ag->device->name, 646 + ag->port_num); 647 + 648 + return NOTIFY_OK; 649 + } 650 + 651 + int ib_mad_agent_security_setup(struct ib_mad_agent *agent, 652 + enum ib_qp_type qp_type) 653 + { 654 + int ret; 655 + 656 + ret = security_ib_alloc_security(&agent->security); 657 + if (ret) 658 + return ret; 659 + 660 + if (qp_type != IB_QPT_SMI) 661 + return 0; 662 + 663 + ret = security_ib_endport_manage_subnet(agent->security, 664 + agent->device->name, 665 + agent->port_num); 666 + if (ret) 667 + return ret; 668 + 669 + agent->lsm_nb.notifier_call = ib_mad_agent_security_change; 670 + ret = register_lsm_notifier(&agent->lsm_nb); 671 + if (ret) 672 + return ret; 673 + 674 + agent->smp_allowed = true; 675 + agent->lsm_nb_reg = true; 676 + return 0; 677 + } 678 + 679 + void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) 680 + { 681 + security_ib_free_security(agent->security); 682 + if (agent->lsm_nb_reg) 683 + unregister_lsm_notifier(&agent->lsm_nb); 684 + } 685 + 686 + int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) 687 + { 688 + int ret; 689 + 690 + if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed) 691 + return -EACCES; 692 + 693 + ret = ib_security_pkey_access(map->agent.device, 694 + map->agent.port_num, 695 + pkey_index, 696 + map->agent.security); 697 + 698 + if (ret) 699 + return ret; 700 + 701 + return 0; 702 + } 613 703 614 704 #endif /* CONFIG_SECURITY_INFINIBAND */
+8
include/linux/lsm_hooks.h
··· 919 919 * @subnet_prefix the subnet prefix of the port being used. 920 920 * @pkey the pkey to be accessed. 921 921 * @sec pointer to a security structure. 922 + * @ib_endport_manage_subnet: 923 + * Check permissions to send and receive SMPs on a end port. 924 + * @dev_name the IB device name (i.e. mlx4_0). 925 + * @port_num the port number. 926 + * @sec pointer to a security structure. 922 927 * @ib_alloc_security: 923 928 * Allocate a security structure for Infiniband objects. 924 929 * @sec pointer to a security structure pointer. ··· 1643 1638 1644 1639 #ifdef CONFIG_SECURITY_INFINIBAND 1645 1640 int (*ib_pkey_access)(void *sec, u64 subnet_prefix, u16 pkey); 1641 + int (*ib_endport_manage_subnet)(void *sec, const char *dev_name, 1642 + u8 port_num); 1646 1643 int (*ib_alloc_security)(void **sec); 1647 1644 void (*ib_free_security)(void *sec); 1648 1645 #endif /* CONFIG_SECURITY_INFINIBAND */ ··· 1882 1875 #endif /* CONFIG_SECURITY_NETWORK */ 1883 1876 #ifdef CONFIG_SECURITY_INFINIBAND 1884 1877 struct list_head ib_pkey_access; 1878 + struct list_head ib_endport_manage_subnet; 1885 1879 struct list_head ib_alloc_security; 1886 1880 struct list_head ib_free_security; 1887 1881 #endif /* CONFIG_SECURITY_INFINIBAND */
+6
include/linux/security.h
··· 1432 1432 1433 1433 #ifdef CONFIG_SECURITY_INFINIBAND 1434 1434 int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey); 1435 + int security_ib_endport_manage_subnet(void *sec, const char *name, u8 port_num); 1435 1436 int security_ib_alloc_security(void **sec); 1436 1437 void security_ib_free_security(void *sec); 1437 1438 #else /* CONFIG_SECURITY_INFINIBAND */ 1438 1439 static inline int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey) 1440 + { 1441 + return 0; 1442 + } 1443 + 1444 + static inline int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num) 1439 1445 { 1440 1446 return 0; 1441 1447 }
+4
include/rdma/ib_mad.h
··· 575 575 u32 flags; 576 576 u8 port_num; 577 577 u8 rmpp_version; 578 + void *security; 579 + bool smp_allowed; 580 + bool lsm_nb_reg; 581 + struct notifier_block lsm_nb; 578 582 }; 579 583 580 584 /**
+6
security/security.c
··· 1544 1544 } 1545 1545 EXPORT_SYMBOL(security_ib_pkey_access); 1546 1546 1547 + int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num) 1548 + { 1549 + return call_int_hook(ib_endport_manage_subnet, 0, sec, dev_name, port_num); 1550 + } 1551 + EXPORT_SYMBOL(security_ib_endport_manage_subnet); 1552 + 1547 1553 int security_ib_alloc_security(void **sec) 1548 1554 { 1549 1555 return call_int_hook(ib_alloc_security, 0, sec);