Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlxsw-spectrum'

Jiri Pirko says:

====================
mlxsw: Driver update, add initial support for Spectrum ASIC

Purpose of this patchset is to introduce initial support for Mellanox
Spectrum ASIC, including L2 bridge forwarding offload.

The only non-mlxsw patch in this patchset is the first one, introducing
pre-change upper notifier. That is used in last patch to ensure ports of
single ASIC are not bridged into multiple bridges, as that scenario is
currently not supported by driver.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+4604 -12
+11
drivers/net/ethernet/mellanox/mlxsw/Kconfig
··· 30 30 31 31 To compile this driver as a module, choose M here: the 32 32 module will be called mlxsw_switchx2. 33 + 34 + config MLXSW_SPECTRUM 35 + tristate "Mellanox Technologies Spectrum support" 36 + depends on MLXSW_CORE && NET_SWITCHDEV 37 + default m 38 + ---help--- 39 + This driver supports Mellanox Technologies Spectrum Ethernet 40 + Switch ASICs. 41 + 42 + To compile this driver as a module, choose M here: the 43 + module will be called mlxsw_spectrum.
+3
drivers/net/ethernet/mellanox/mlxsw/Makefile
··· 4 4 mlxsw_pci-objs := pci.o 5 5 obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o 6 6 mlxsw_switchx2-objs := switchx2.o 7 + obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o 8 + mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ 9 + spectrum_switchdev.o
+25
drivers/net/ethernet/mellanox/mlxsw/cmd.h
··· 674 674 */ 675 675 MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2); 676 676 677 + /* cmd_mbox_config_profile_max_fid_offset_flood_tables 678 + * Maximum number of FID-offset flooding tables. 679 + */ 680 + MLXSW_ITEM32(cmd_mbox, config_profile, 681 + max_fid_offset_flood_tables, 0x34, 24, 4); 682 + 683 + /* cmd_mbox_config_profile_fid_offset_flood_table_size 684 + * The size (number of entries) of each FID-offset flood table. 685 + */ 686 + MLXSW_ITEM32(cmd_mbox, config_profile, 687 + fid_offset_flood_table_size, 0x34, 0, 16); 688 + 689 + /* cmd_mbox_config_profile_max_fid_flood_tables 690 + * Maximum number of per-FID flooding tables. 691 + * 692 + * Note: This flooding tables cover special FIDs only (vFIDs), starting at 693 + * FID value 4K and higher. 694 + */ 695 + MLXSW_ITEM32(cmd_mbox, config_profile, max_fid_flood_tables, 0x38, 24, 4); 696 + 697 + /* cmd_mbox_config_profile_fid_flood_table_size 698 + * The size (number of entries) of each per-FID table. 699 + */ 700 + MLXSW_ITEM32(cmd_mbox, config_profile, fid_flood_table_size, 0x38, 0, 16); 701 + 677 702 /* cmd_mbox_config_profile_max_ib_mc 678 703 * Maximum number of multicast FDB records for InfiniBand 679 704 * FDB (in 512 chunks) per InfiniBand switch partition.
+5
drivers/net/ethernet/mellanox/mlxsw/core.h
··· 54 54 MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind) 55 55 56 56 #define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2" 57 + #define MLXSW_DEVICE_KIND_SPECTRUM "spectrum" 57 58 58 59 struct mlxsw_core; 59 60 struct mlxsw_driver; ··· 154 153 u8 max_flood_tables; 155 154 u8 max_vid_flood_tables; 156 155 u8 flood_mode; 156 + u8 max_fid_offset_flood_tables; 157 + u16 fid_offset_flood_table_size; 158 + u8 max_fid_flood_tables; 159 + u16 fid_flood_table_size; 157 160 u16 max_ib_mc; 158 161 u16 max_pkey; 159 162 u8 ar_sec;
+42 -8
drivers/net/ethernet/mellanox/mlxsw/item.h
··· 171 171 } 172 172 173 173 static inline void __mlxsw_item_memcpy_from(char *buf, char *dst, 174 - struct mlxsw_item *item) 174 + struct mlxsw_item *item, 175 + unsigned short index) 175 176 { 176 - memcpy(dst, &buf[item->offset], item->size.bytes); 177 + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char)); 178 + 179 + memcpy(dst, &buf[offset], item->size.bytes); 177 180 } 178 181 179 - static inline void __mlxsw_item_memcpy_to(char *buf, char *src, 180 - struct mlxsw_item *item) 182 + static inline void __mlxsw_item_memcpy_to(char *buf, const char *src, 183 + struct mlxsw_item *item, 184 + unsigned short index) 181 185 { 182 - memcpy(&buf[item->offset], src, item->size.bytes); 186 + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char)); 187 + 188 + memcpy(&buf[offset], src, item->size.bytes); 183 189 } 184 190 185 191 static inline u16 ··· 377 371 static inline void \ 378 372 mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst) \ 379 373 { \ 380 - __mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\ 374 + __mlxsw_item_memcpy_from(buf, dst, \ 375 + &__ITEM_NAME(_type, _cname, _iname), 0); \ 381 376 } \ 382 377 static inline void \ 383 - mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src) \ 378 + mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \ 384 379 { \ 385 - __mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname)); \ 380 + __mlxsw_item_memcpy_to(buf, src, \ 381 + &__ITEM_NAME(_type, _cname, _iname), 0); \ 382 + } 383 + 384 + #define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \ 385 + _step, _instepoffset) \ 386 + static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 387 + .offset = _offset, \ 388 + .step = _step, \ 389 + .in_step_offset = _instepoffset, \ 390 + .size = {.bytes = _sizebytes,}, \ 391 + .name = #_type "_" #_cname "_" #_iname, \ 392 + }; \ 393 + static inline void \ 394 + mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, \ 395 + unsigned short index, \ 396 + char *dst) \ 397 + { \ 398 + __mlxsw_item_memcpy_from(buf, dst, \ 399 + &__ITEM_NAME(_type, _cname, _iname), index); \ 400 + } \ 401 + static inline void \ 402 + mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \ 403 + unsigned short index, \ 404 + const char *src) \ 405 + { \ 406 + __mlxsw_item_memcpy_to(buf, src, \ 407 + &__ITEM_NAME(_type, _cname, _iname), index); \ 386 408 } 387 409 388 410 #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \
+11
drivers/net/ethernet/mellanox/mlxsw/pci.c
··· 57 57 58 58 static const struct pci_device_id mlxsw_pci_id_table[] = { 59 59 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0}, 60 + {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 60 61 {0, } 61 62 }; 62 63 ··· 68 67 switch (id->device) { 69 68 case PCI_DEVICE_ID_MELLANOX_SWITCHX2: 70 69 return MLXSW_DEVICE_KIND_SWITCHX2; 70 + case PCI_DEVICE_ID_MELLANOX_SPECTRUM: 71 + return MLXSW_DEVICE_KIND_SPECTRUM; 71 72 default: 72 73 BUG(); 73 74 } ··· 1217 1214 mbox, profile->max_flood_tables); 1218 1215 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set( 1219 1216 mbox, profile->max_vid_flood_tables); 1217 + mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set( 1218 + mbox, profile->max_fid_offset_flood_tables); 1219 + mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set( 1220 + mbox, profile->fid_offset_flood_table_size); 1221 + mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set( 1222 + mbox, profile->max_fid_flood_tables); 1223 + mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set( 1224 + mbox, profile->fid_flood_table_size); 1220 1225 } 1221 1226 if (profile->used_flood_mode) { 1222 1227 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
+1
drivers/net/ethernet/mellanox/mlxsw/pci.h
··· 40 40 #include "item.h" 41 41 42 42 #define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738 43 + #define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84 43 44 #define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */ 44 45 #define MLXSW_PCI_PAGE_SIZE 4096 45 46
+1139 -3
drivers/net/ethernet/mellanox/mlxsw/reg.h
··· 157 157 mlxsw_reg_sspr_system_port_set(payload, local_port); 158 158 } 159 159 160 + /* SFDAT - Switch Filtering Database Aging Time 161 + * -------------------------------------------- 162 + * Controls the Switch aging time. Aging time is able to be set per Switch 163 + * Partition. 164 + */ 165 + #define MLXSW_REG_SFDAT_ID 0x2009 166 + #define MLXSW_REG_SFDAT_LEN 0x8 167 + 168 + static const struct mlxsw_reg_info mlxsw_reg_sfdat = { 169 + .id = MLXSW_REG_SFDAT_ID, 170 + .len = MLXSW_REG_SFDAT_LEN, 171 + }; 172 + 173 + /* reg_sfdat_swid 174 + * Switch partition ID. 175 + * Access: Index 176 + */ 177 + MLXSW_ITEM32(reg, sfdat, swid, 0x00, 24, 8); 178 + 179 + /* reg_sfdat_age_time 180 + * Aging time in seconds 181 + * Min - 10 seconds 182 + * Max - 1,000,000 seconds 183 + * Default is 300 seconds. 184 + * Access: RW 185 + */ 186 + MLXSW_ITEM32(reg, sfdat, age_time, 0x04, 0, 20); 187 + 188 + static inline void mlxsw_reg_sfdat_pack(char *payload, u32 age_time) 189 + { 190 + MLXSW_REG_ZERO(sfdat, payload); 191 + mlxsw_reg_sfdat_swid_set(payload, 0); 192 + mlxsw_reg_sfdat_age_time_set(payload, age_time); 193 + } 194 + 195 + /* SFD - Switch Filtering Database 196 + * ------------------------------- 197 + * The following register defines the access to the filtering database. 198 + * The register supports querying, adding, removing and modifying the database. 199 + * The access is optimized for bulk updates in which case more than one 200 + * FDB record is present in the same command. 201 + */ 202 + #define MLXSW_REG_SFD_ID 0x200A 203 + #define MLXSW_REG_SFD_BASE_LEN 0x10 /* base length, without records */ 204 + #define MLXSW_REG_SFD_REC_LEN 0x10 /* record length */ 205 + #define MLXSW_REG_SFD_REC_MAX_COUNT 64 206 + #define MLXSW_REG_SFD_LEN (MLXSW_REG_SFD_BASE_LEN + \ 207 + MLXSW_REG_SFD_REC_LEN * MLXSW_REG_SFD_REC_MAX_COUNT) 208 + 209 + static const struct mlxsw_reg_info mlxsw_reg_sfd = { 210 + .id = MLXSW_REG_SFD_ID, 211 + .len = MLXSW_REG_SFD_LEN, 212 + }; 213 + 214 + /* reg_sfd_swid 215 + * Switch partition ID for queries. Reserved on Write. 216 + * Access: Index 217 + */ 218 + MLXSW_ITEM32(reg, sfd, swid, 0x00, 24, 8); 219 + 220 + enum mlxsw_reg_sfd_op { 221 + /* Dump entire FDB a (process according to record_locator) */ 222 + MLXSW_REG_SFD_OP_QUERY_DUMP = 0, 223 + /* Query records by {MAC, VID/FID} value */ 224 + MLXSW_REG_SFD_OP_QUERY_QUERY = 1, 225 + /* Query and clear activity. Query records by {MAC, VID/FID} value */ 226 + MLXSW_REG_SFD_OP_QUERY_QUERY_AND_CLEAR_ACTIVITY = 2, 227 + /* Test. Response indicates if each of the records could be 228 + * added to the FDB. 229 + */ 230 + MLXSW_REG_SFD_OP_WRITE_TEST = 0, 231 + /* Add/modify. Aged-out records cannot be added. This command removes 232 + * the learning notification of the {MAC, VID/FID}. Response includes 233 + * the entries that were added to the FDB. 234 + */ 235 + MLXSW_REG_SFD_OP_WRITE_EDIT = 1, 236 + /* Remove record by {MAC, VID/FID}. This command also removes 237 + * the learning notification and aged-out notifications 238 + * of the {MAC, VID/FID}. The response provides current (pre-removal) 239 + * entries as non-aged-out. 240 + */ 241 + MLXSW_REG_SFD_OP_WRITE_REMOVE = 2, 242 + /* Remove learned notification by {MAC, VID/FID}. The response provides 243 + * the removed learning notification. 244 + */ 245 + MLXSW_REG_SFD_OP_WRITE_REMOVE_NOTIFICATION = 2, 246 + }; 247 + 248 + /* reg_sfd_op 249 + * Operation. 250 + * Access: OP 251 + */ 252 + MLXSW_ITEM32(reg, sfd, op, 0x04, 30, 2); 253 + 254 + /* reg_sfd_record_locator 255 + * Used for querying the FDB. Use record_locator=0 to initiate the 256 + * query. When a record is returned, a new record_locator is 257 + * returned to be used in the subsequent query. 258 + * Reserved for database update. 259 + * Access: Index 260 + */ 261 + MLXSW_ITEM32(reg, sfd, record_locator, 0x04, 0, 30); 262 + 263 + /* reg_sfd_num_rec 264 + * Request: Number of records to read/add/modify/remove 265 + * Response: Number of records read/added/replaced/removed 266 + * See above description for more details. 267 + * Ranges 0..64 268 + * Access: RW 269 + */ 270 + MLXSW_ITEM32(reg, sfd, num_rec, 0x08, 0, 8); 271 + 272 + static inline void mlxsw_reg_sfd_pack(char *payload, enum mlxsw_reg_sfd_op op, 273 + u32 record_locator) 274 + { 275 + MLXSW_REG_ZERO(sfd, payload); 276 + mlxsw_reg_sfd_op_set(payload, op); 277 + mlxsw_reg_sfd_record_locator_set(payload, record_locator); 278 + } 279 + 280 + /* reg_sfd_rec_swid 281 + * Switch partition ID. 282 + * Access: Index 283 + */ 284 + MLXSW_ITEM32_INDEXED(reg, sfd, rec_swid, MLXSW_REG_SFD_BASE_LEN, 24, 8, 285 + MLXSW_REG_SFD_REC_LEN, 0x00, false); 286 + 287 + enum mlxsw_reg_sfd_rec_type { 288 + MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0, 289 + }; 290 + 291 + /* reg_sfd_rec_type 292 + * FDB record type. 293 + * Access: RW 294 + */ 295 + MLXSW_ITEM32_INDEXED(reg, sfd, rec_type, MLXSW_REG_SFD_BASE_LEN, 20, 4, 296 + MLXSW_REG_SFD_REC_LEN, 0x00, false); 297 + 298 + enum mlxsw_reg_sfd_rec_policy { 299 + /* Replacement disabled, aging disabled. */ 300 + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY = 0, 301 + /* (mlag remote): Replacement enabled, aging disabled, 302 + * learning notification enabled on this port. 303 + */ 304 + MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG = 1, 305 + /* (ingress device): Replacement enabled, aging enabled. */ 306 + MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS = 3, 307 + }; 308 + 309 + /* reg_sfd_rec_policy 310 + * Policy. 311 + * Access: RW 312 + */ 313 + MLXSW_ITEM32_INDEXED(reg, sfd, rec_policy, MLXSW_REG_SFD_BASE_LEN, 18, 2, 314 + MLXSW_REG_SFD_REC_LEN, 0x00, false); 315 + 316 + /* reg_sfd_rec_a 317 + * Activity. Set for new static entries. Set for static entries if a frame SMAC 318 + * lookup hits on the entry. 319 + * To clear the a bit, use "query and clear activity" op. 320 + * Access: RO 321 + */ 322 + MLXSW_ITEM32_INDEXED(reg, sfd, rec_a, MLXSW_REG_SFD_BASE_LEN, 16, 1, 323 + MLXSW_REG_SFD_REC_LEN, 0x00, false); 324 + 325 + /* reg_sfd_rec_mac 326 + * MAC address. 327 + * Access: Index 328 + */ 329 + MLXSW_ITEM_BUF_INDEXED(reg, sfd, rec_mac, MLXSW_REG_SFD_BASE_LEN, 6, 330 + MLXSW_REG_SFD_REC_LEN, 0x02); 331 + 332 + enum mlxsw_reg_sfd_rec_action { 333 + /* forward */ 334 + MLXSW_REG_SFD_REC_ACTION_NOP = 0, 335 + /* forward and trap, trap_id is FDB_TRAP */ 336 + MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1, 337 + /* trap and do not forward, trap_id is FDB_TRAP */ 338 + MLXSW_REG_SFD_REC_ACTION_TRAP = 3, 339 + MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15, 340 + }; 341 + 342 + /* reg_sfd_rec_action 343 + * Action to apply on the packet. 344 + * Note: Dynamic entries can only be configured with NOP action. 345 + * Access: RW 346 + */ 347 + MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4, 348 + MLXSW_REG_SFD_REC_LEN, 0x0C, false); 349 + 350 + /* reg_sfd_uc_sub_port 351 + * LAG sub port. 352 + * Must be 0 if multichannel VEPA is not enabled. 353 + * Access: RW 354 + */ 355 + MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8, 356 + MLXSW_REG_SFD_REC_LEN, 0x08, false); 357 + 358 + /* reg_sfd_uc_fid_vid 359 + * Filtering ID or VLAN ID 360 + * For SwitchX and SwitchX-2: 361 + * - Dynamic entries (policy 2,3) use FID 362 + * - Static entries (policy 0) use VID 363 + * - When independent learning is configured, VID=FID 364 + * For Spectrum: use FID for both Dynamic and Static entries. 365 + * VID should not be used. 366 + * Access: Index 367 + */ 368 + MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16, 369 + MLXSW_REG_SFD_REC_LEN, 0x08, false); 370 + 371 + /* reg_sfd_uc_system_port 372 + * Unique port identifier for the final destination of the packet. 373 + * Access: RW 374 + */ 375 + MLXSW_ITEM32_INDEXED(reg, sfd, uc_system_port, MLXSW_REG_SFD_BASE_LEN, 0, 16, 376 + MLXSW_REG_SFD_REC_LEN, 0x0C, false); 377 + 378 + static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index, 379 + enum mlxsw_reg_sfd_rec_policy policy, 380 + const char *mac, u16 vid, 381 + enum mlxsw_reg_sfd_rec_action action, 382 + u8 local_port) 383 + { 384 + u8 num_rec = mlxsw_reg_sfd_num_rec_get(payload); 385 + 386 + if (rec_index >= num_rec) 387 + mlxsw_reg_sfd_num_rec_set(payload, rec_index + 1); 388 + mlxsw_reg_sfd_rec_swid_set(payload, rec_index, 0); 389 + mlxsw_reg_sfd_rec_type_set(payload, rec_index, 390 + MLXSW_REG_SFD_REC_TYPE_UNICAST); 391 + mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); 392 + mlxsw_reg_sfd_rec_mac_memcpy_to(payload, rec_index, mac); 393 + mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0); 394 + mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, vid); 395 + mlxsw_reg_sfd_rec_action_set(payload, rec_index, action); 396 + mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port); 397 + } 398 + 399 + static inline void 400 + mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index, 401 + char *mac, u16 *p_vid, 402 + u8 *p_local_port) 403 + { 404 + mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac); 405 + *p_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index); 406 + *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index); 407 + } 408 + 409 + /* SFN - Switch FDB Notification Register 410 + * ------------------------------------------- 411 + * The switch provides notifications on newly learned FDB entries and 412 + * aged out entries. The notifications can be polled by software. 413 + */ 414 + #define MLXSW_REG_SFN_ID 0x200B 415 + #define MLXSW_REG_SFN_BASE_LEN 0x10 /* base length, without records */ 416 + #define MLXSW_REG_SFN_REC_LEN 0x10 /* record length */ 417 + #define MLXSW_REG_SFN_REC_MAX_COUNT 64 418 + #define MLXSW_REG_SFN_LEN (MLXSW_REG_SFN_BASE_LEN + \ 419 + MLXSW_REG_SFN_REC_LEN * MLXSW_REG_SFN_REC_MAX_COUNT) 420 + 421 + static const struct mlxsw_reg_info mlxsw_reg_sfn = { 422 + .id = MLXSW_REG_SFN_ID, 423 + .len = MLXSW_REG_SFN_LEN, 424 + }; 425 + 426 + /* reg_sfn_swid 427 + * Switch partition ID. 428 + * Access: Index 429 + */ 430 + MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8); 431 + 432 + /* reg_sfn_num_rec 433 + * Request: Number of learned notifications and aged-out notification 434 + * records requested. 435 + * Response: Number of notification records returned (must be smaller 436 + * than or equal to the value requested) 437 + * Ranges 0..64 438 + * Access: OP 439 + */ 440 + MLXSW_ITEM32(reg, sfn, num_rec, 0x04, 0, 8); 441 + 442 + static inline void mlxsw_reg_sfn_pack(char *payload) 443 + { 444 + MLXSW_REG_ZERO(sfn, payload); 445 + mlxsw_reg_sfn_swid_set(payload, 0); 446 + mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT); 447 + } 448 + 449 + /* reg_sfn_rec_swid 450 + * Switch partition ID. 451 + * Access: RO 452 + */ 453 + MLXSW_ITEM32_INDEXED(reg, sfn, rec_swid, MLXSW_REG_SFN_BASE_LEN, 24, 8, 454 + MLXSW_REG_SFN_REC_LEN, 0x00, false); 455 + 456 + enum mlxsw_reg_sfn_rec_type { 457 + /* MAC addresses learned on a regular port. */ 458 + MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC = 0x5, 459 + /* Aged-out MAC address on a regular port */ 460 + MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7, 461 + }; 462 + 463 + /* reg_sfn_rec_type 464 + * Notification record type. 465 + * Access: RO 466 + */ 467 + MLXSW_ITEM32_INDEXED(reg, sfn, rec_type, MLXSW_REG_SFN_BASE_LEN, 20, 4, 468 + MLXSW_REG_SFN_REC_LEN, 0x00, false); 469 + 470 + /* reg_sfn_rec_mac 471 + * MAC address. 472 + * Access: RO 473 + */ 474 + MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6, 475 + MLXSW_REG_SFN_REC_LEN, 0x02); 476 + 477 + /* reg_sfd_mac_sub_port 478 + * VEPA channel on the local port. 479 + * 0 if multichannel VEPA is not enabled. 480 + * Access: RO 481 + */ 482 + MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8, 483 + MLXSW_REG_SFN_REC_LEN, 0x08, false); 484 + 485 + /* reg_sfd_mac_fid 486 + * Filtering identifier. 487 + * Access: RO 488 + */ 489 + MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16, 490 + MLXSW_REG_SFN_REC_LEN, 0x08, false); 491 + 492 + /* reg_sfd_mac_system_port 493 + * Unique port identifier for the final destination of the packet. 494 + * Access: RO 495 + */ 496 + MLXSW_ITEM32_INDEXED(reg, sfn, mac_system_port, MLXSW_REG_SFN_BASE_LEN, 0, 16, 497 + MLXSW_REG_SFN_REC_LEN, 0x0C, false); 498 + 499 + static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index, 500 + char *mac, u16 *p_vid, 501 + u8 *p_local_port) 502 + { 503 + mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac); 504 + *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index); 505 + *p_local_port = mlxsw_reg_sfn_mac_system_port_get(payload, rec_index); 506 + } 507 + 160 508 /* SPMS - Switch Port MSTP/RSTP State Register 161 509 * ------------------------------------------- 162 510 * Configures the spanning tree state of a physical port. ··· 550 202 enum mlxsw_reg_spms_state state) 551 203 { 552 204 mlxsw_reg_spms_state_set(payload, vid, state); 205 + } 206 + 207 + /* SPVID - Switch Port VID 208 + * ----------------------- 209 + * The switch port VID configures the default VID for a port. 210 + */ 211 + #define MLXSW_REG_SPVID_ID 0x200E 212 + #define MLXSW_REG_SPVID_LEN 0x08 213 + 214 + static const struct mlxsw_reg_info mlxsw_reg_spvid = { 215 + .id = MLXSW_REG_SPVID_ID, 216 + .len = MLXSW_REG_SPVID_LEN, 217 + }; 218 + 219 + /* reg_spvid_local_port 220 + * Local port number. 221 + * Access: Index 222 + */ 223 + MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8); 224 + 225 + /* reg_spvid_sub_port 226 + * Virtual port within the physical port. 227 + * Should be set to 0 when virtual ports are not enabled on the port. 228 + * Access: Index 229 + */ 230 + MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8); 231 + 232 + /* reg_spvid_pvid 233 + * Port default VID 234 + * Access: RW 235 + */ 236 + MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12); 237 + 238 + static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid) 239 + { 240 + MLXSW_REG_ZERO(spvid, payload); 241 + mlxsw_reg_spvid_local_port_set(payload, local_port); 242 + mlxsw_reg_spvid_pvid_set(payload, pvid); 243 + } 244 + 245 + /* SPVM - Switch Port VLAN Membership 246 + * ---------------------------------- 247 + * The Switch Port VLAN Membership register configures the VLAN membership 248 + * of a port in a VLAN denoted by VID. VLAN membership is managed per 249 + * virtual port. The register can be used to add and remove VID(s) from a port. 250 + */ 251 + #define MLXSW_REG_SPVM_ID 0x200F 252 + #define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */ 253 + #define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */ 254 + #define MLXSW_REG_SPVM_REC_MAX_COUNT 256 255 + #define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \ 256 + MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT) 257 + 258 + static const struct mlxsw_reg_info mlxsw_reg_spvm = { 259 + .id = MLXSW_REG_SPVM_ID, 260 + .len = MLXSW_REG_SPVM_LEN, 261 + }; 262 + 263 + /* reg_spvm_pt 264 + * Priority tagged. If this bit is set, packets forwarded to the port with 265 + * untagged VLAN membership (u bit is set) will be tagged with priority tag 266 + * (VID=0) 267 + * Access: RW 268 + */ 269 + MLXSW_ITEM32(reg, spvm, pt, 0x00, 31, 1); 270 + 271 + /* reg_spvm_pte 272 + * Priority Tagged Update Enable. On Write operations, if this bit is cleared, 273 + * the pt bit will NOT be updated. To update the pt bit, pte must be set. 274 + * Access: WO 275 + */ 276 + MLXSW_ITEM32(reg, spvm, pte, 0x00, 30, 1); 277 + 278 + /* reg_spvm_local_port 279 + * Local port number. 280 + * Access: Index 281 + */ 282 + MLXSW_ITEM32(reg, spvm, local_port, 0x00, 16, 8); 283 + 284 + /* reg_spvm_sub_port 285 + * Virtual port within the physical port. 286 + * Should be set to 0 when virtual ports are not enabled on the port. 287 + * Access: Index 288 + */ 289 + MLXSW_ITEM32(reg, spvm, sub_port, 0x00, 8, 8); 290 + 291 + /* reg_spvm_num_rec 292 + * Number of records to update. Each record contains: i, e, u, vid. 293 + * Access: OP 294 + */ 295 + MLXSW_ITEM32(reg, spvm, num_rec, 0x00, 0, 8); 296 + 297 + /* reg_spvm_rec_i 298 + * Ingress membership in VLAN ID. 299 + * Access: Index 300 + */ 301 + MLXSW_ITEM32_INDEXED(reg, spvm, rec_i, 302 + MLXSW_REG_SPVM_BASE_LEN, 14, 1, 303 + MLXSW_REG_SPVM_REC_LEN, 0, false); 304 + 305 + /* reg_spvm_rec_e 306 + * Egress membership in VLAN ID. 307 + * Access: Index 308 + */ 309 + MLXSW_ITEM32_INDEXED(reg, spvm, rec_e, 310 + MLXSW_REG_SPVM_BASE_LEN, 13, 1, 311 + MLXSW_REG_SPVM_REC_LEN, 0, false); 312 + 313 + /* reg_spvm_rec_u 314 + * Untagged - port is an untagged member - egress transmission uses untagged 315 + * frames on VID<n> 316 + * Access: Index 317 + */ 318 + MLXSW_ITEM32_INDEXED(reg, spvm, rec_u, 319 + MLXSW_REG_SPVM_BASE_LEN, 12, 1, 320 + MLXSW_REG_SPVM_REC_LEN, 0, false); 321 + 322 + /* reg_spvm_rec_vid 323 + * Egress membership in VLAN ID. 324 + * Access: Index 325 + */ 326 + MLXSW_ITEM32_INDEXED(reg, spvm, rec_vid, 327 + MLXSW_REG_SPVM_BASE_LEN, 0, 12, 328 + MLXSW_REG_SPVM_REC_LEN, 0, false); 329 + 330 + static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port, 331 + u16 vid_begin, u16 vid_end, 332 + bool is_member, bool untagged) 333 + { 334 + int size = vid_end - vid_begin + 1; 335 + int i; 336 + 337 + MLXSW_REG_ZERO(spvm, payload); 338 + mlxsw_reg_spvm_local_port_set(payload, local_port); 339 + mlxsw_reg_spvm_num_rec_set(payload, size); 340 + 341 + for (i = 0; i < size; i++) { 342 + mlxsw_reg_spvm_rec_i_set(payload, i, is_member); 343 + mlxsw_reg_spvm_rec_e_set(payload, i, is_member); 344 + mlxsw_reg_spvm_rec_u_set(payload, i, untagged); 345 + mlxsw_reg_spvm_rec_vid_set(payload, i, vid_begin + i); 346 + } 553 347 } 554 348 555 349 /* SFGC - Switch Flooding Group Configuration ··· 853 363 unsigned int flood_table, 854 364 unsigned int index, 855 365 enum mlxsw_flood_table_type table_type, 856 - unsigned int range) 366 + unsigned int range, u8 port, bool set) 857 367 { 858 368 MLXSW_REG_ZERO(sftr, payload); 859 369 mlxsw_reg_sftr_swid_set(payload, 0); ··· 861 371 mlxsw_reg_sftr_index_set(payload, index); 862 372 mlxsw_reg_sftr_table_type_set(payload, table_type); 863 373 mlxsw_reg_sftr_range_set(payload, range); 864 - mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1); 865 - mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1); 374 + mlxsw_reg_sftr_port_set(payload, port, set); 375 + mlxsw_reg_sftr_port_mask_set(payload, port, 1); 866 376 } 867 377 868 378 /* SPMLR - Switch Port MAC Learning Register ··· 916 426 mlxsw_reg_spmlr_local_port_set(payload, local_port); 917 427 mlxsw_reg_spmlr_sub_port_set(payload, 0); 918 428 mlxsw_reg_spmlr_learn_mode_set(payload, mode); 429 + } 430 + 431 + /* SVFA - Switch VID to FID Allocation Register 432 + * -------------------------------------------- 433 + * Controls the VID to FID mapping and {Port, VID} to FID mapping for 434 + * virtualized ports. 435 + */ 436 + #define MLXSW_REG_SVFA_ID 0x201C 437 + #define MLXSW_REG_SVFA_LEN 0x10 438 + 439 + static const struct mlxsw_reg_info mlxsw_reg_svfa = { 440 + .id = MLXSW_REG_SVFA_ID, 441 + .len = MLXSW_REG_SVFA_LEN, 442 + }; 443 + 444 + /* reg_svfa_swid 445 + * Switch partition ID. 446 + * Access: Index 447 + */ 448 + MLXSW_ITEM32(reg, svfa, swid, 0x00, 24, 8); 449 + 450 + /* reg_svfa_local_port 451 + * Local port number. 452 + * Access: Index 453 + * 454 + * Note: Reserved for 802.1Q FIDs. 455 + */ 456 + MLXSW_ITEM32(reg, svfa, local_port, 0x00, 16, 8); 457 + 458 + enum mlxsw_reg_svfa_mt { 459 + MLXSW_REG_SVFA_MT_VID_TO_FID, 460 + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 461 + }; 462 + 463 + /* reg_svfa_mapping_table 464 + * Mapping table: 465 + * 0 - VID to FID 466 + * 1 - {Port, VID} to FID 467 + * Access: Index 468 + * 469 + * Note: Reserved for SwitchX-2. 470 + */ 471 + MLXSW_ITEM32(reg, svfa, mapping_table, 0x00, 8, 3); 472 + 473 + /* reg_svfa_v 474 + * Valid. 475 + * Valid if set. 476 + * Access: RW 477 + * 478 + * Note: Reserved for SwitchX-2. 479 + */ 480 + MLXSW_ITEM32(reg, svfa, v, 0x00, 0, 1); 481 + 482 + /* reg_svfa_fid 483 + * Filtering ID. 484 + * Access: RW 485 + */ 486 + MLXSW_ITEM32(reg, svfa, fid, 0x04, 16, 16); 487 + 488 + /* reg_svfa_vid 489 + * VLAN ID. 490 + * Access: Index 491 + */ 492 + MLXSW_ITEM32(reg, svfa, vid, 0x04, 0, 12); 493 + 494 + /* reg_svfa_counter_set_type 495 + * Counter set type for flow counters. 496 + * Access: RW 497 + * 498 + * Note: Reserved for SwitchX-2. 499 + */ 500 + MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8); 501 + 502 + /* reg_svfa_counter_index 503 + * Counter index for flow counters. 504 + * Access: RW 505 + * 506 + * Note: Reserved for SwitchX-2. 507 + */ 508 + MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24); 509 + 510 + static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port, 511 + enum mlxsw_reg_svfa_mt mt, bool valid, 512 + u16 fid, u16 vid) 513 + { 514 + MLXSW_REG_ZERO(svfa, payload); 515 + local_port = mt == MLXSW_REG_SVFA_MT_VID_TO_FID ? 0 : local_port; 516 + mlxsw_reg_svfa_swid_set(payload, 0); 517 + mlxsw_reg_svfa_local_port_set(payload, local_port); 518 + mlxsw_reg_svfa_mapping_table_set(payload, mt); 519 + mlxsw_reg_svfa_v_set(payload, valid); 520 + mlxsw_reg_svfa_fid_set(payload, fid); 521 + mlxsw_reg_svfa_vid_set(payload, vid); 522 + } 523 + 524 + /* SVPE - Switch Virtual-Port Enabling Register 525 + * -------------------------------------------- 526 + * Enables port virtualization. 527 + */ 528 + #define MLXSW_REG_SVPE_ID 0x201E 529 + #define MLXSW_REG_SVPE_LEN 0x4 530 + 531 + static const struct mlxsw_reg_info mlxsw_reg_svpe = { 532 + .id = MLXSW_REG_SVPE_ID, 533 + .len = MLXSW_REG_SVPE_LEN, 534 + }; 535 + 536 + /* reg_svpe_local_port 537 + * Local port number 538 + * Access: Index 539 + * 540 + * Note: CPU port is not supported (uses VLAN mode only). 541 + */ 542 + MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8); 543 + 544 + /* reg_svpe_vp_en 545 + * Virtual port enable. 546 + * 0 - Disable, VLAN mode (VID to FID). 547 + * 1 - Enable, Virtual port mode ({Port, VID} to FID). 548 + * Access: RW 549 + */ 550 + MLXSW_ITEM32(reg, svpe, vp_en, 0x00, 8, 1); 551 + 552 + static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port, 553 + bool enable) 554 + { 555 + MLXSW_REG_ZERO(svpe, payload); 556 + mlxsw_reg_svpe_local_port_set(payload, local_port); 557 + mlxsw_reg_svpe_vp_en_set(payload, enable); 558 + } 559 + 560 + /* SFMR - Switch FID Management Register 561 + * ------------------------------------- 562 + * Creates and configures FIDs. 563 + */ 564 + #define MLXSW_REG_SFMR_ID 0x201F 565 + #define MLXSW_REG_SFMR_LEN 0x18 566 + 567 + static const struct mlxsw_reg_info mlxsw_reg_sfmr = { 568 + .id = MLXSW_REG_SFMR_ID, 569 + .len = MLXSW_REG_SFMR_LEN, 570 + }; 571 + 572 + enum mlxsw_reg_sfmr_op { 573 + MLXSW_REG_SFMR_OP_CREATE_FID, 574 + MLXSW_REG_SFMR_OP_DESTROY_FID, 575 + }; 576 + 577 + /* reg_sfmr_op 578 + * Operation. 579 + * 0 - Create or edit FID. 580 + * 1 - Destroy FID. 581 + * Access: WO 582 + */ 583 + MLXSW_ITEM32(reg, sfmr, op, 0x00, 24, 4); 584 + 585 + /* reg_sfmr_fid 586 + * Filtering ID. 587 + * Access: Index 588 + */ 589 + MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16); 590 + 591 + /* reg_sfmr_fid_offset 592 + * FID offset. 593 + * Used to point into the flooding table selected by SFGC register if 594 + * the table is of type FID-Offset. Otherwise, this field is reserved. 595 + * Access: RW 596 + */ 597 + MLXSW_ITEM32(reg, sfmr, fid_offset, 0x08, 0, 16); 598 + 599 + /* reg_sfmr_vtfp 600 + * Valid Tunnel Flood Pointer. 601 + * If not set, then nve_tunnel_flood_ptr is reserved and considered NULL. 602 + * Access: RW 603 + * 604 + * Note: Reserved for 802.1Q FIDs. 605 + */ 606 + MLXSW_ITEM32(reg, sfmr, vtfp, 0x0C, 31, 1); 607 + 608 + /* reg_sfmr_nve_tunnel_flood_ptr 609 + * Underlay Flooding and BC Pointer. 610 + * Used as a pointer to the first entry of the group based link lists of 611 + * flooding or BC entries (for NVE tunnels). 612 + * Access: RW 613 + */ 614 + MLXSW_ITEM32(reg, sfmr, nve_tunnel_flood_ptr, 0x0C, 0, 24); 615 + 616 + /* reg_sfmr_vv 617 + * VNI Valid. 618 + * If not set, then vni is reserved. 619 + * Access: RW 620 + * 621 + * Note: Reserved for 802.1Q FIDs. 622 + */ 623 + MLXSW_ITEM32(reg, sfmr, vv, 0x10, 31, 1); 624 + 625 + /* reg_sfmr_vni 626 + * Virtual Network Identifier. 627 + * Access: RW 628 + * 629 + * Note: A given VNI can only be assigned to one FID. 630 + */ 631 + MLXSW_ITEM32(reg, sfmr, vni, 0x10, 0, 24); 632 + 633 + static inline void mlxsw_reg_sfmr_pack(char *payload, 634 + enum mlxsw_reg_sfmr_op op, u16 fid, 635 + u16 fid_offset) 636 + { 637 + MLXSW_REG_ZERO(sfmr, payload); 638 + mlxsw_reg_sfmr_op_set(payload, op); 639 + mlxsw_reg_sfmr_fid_set(payload, fid); 640 + mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset); 641 + mlxsw_reg_sfmr_vtfp_set(payload, false); 642 + mlxsw_reg_sfmr_vv_set(payload, false); 643 + } 644 + 645 + /* SPVMLR - Switch Port VLAN MAC Learning Register 646 + * ----------------------------------------------- 647 + * Controls the switch MAC learning policy per {Port, VID}. 648 + */ 649 + #define MLXSW_REG_SPVMLR_ID 0x2020 650 + #define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */ 651 + #define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */ 652 + #define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256 653 + #define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \ 654 + MLXSW_REG_SPVMLR_REC_LEN * \ 655 + MLXSW_REG_SPVMLR_REC_MAX_COUNT) 656 + 657 + static const struct mlxsw_reg_info mlxsw_reg_spvmlr = { 658 + .id = MLXSW_REG_SPVMLR_ID, 659 + .len = MLXSW_REG_SPVMLR_LEN, 660 + }; 661 + 662 + /* reg_spvmlr_local_port 663 + * Local ingress port. 664 + * Access: Index 665 + * 666 + * Note: CPU port is not supported. 667 + */ 668 + MLXSW_ITEM32(reg, spvmlr, local_port, 0x00, 16, 8); 669 + 670 + /* reg_spvmlr_num_rec 671 + * Number of records to update. 672 + * Access: OP 673 + */ 674 + MLXSW_ITEM32(reg, spvmlr, num_rec, 0x00, 0, 8); 675 + 676 + /* reg_spvmlr_rec_learn_enable 677 + * 0 - Disable learning for {Port, VID}. 678 + * 1 - Enable learning for {Port, VID}. 679 + * Access: RW 680 + */ 681 + MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_learn_enable, MLXSW_REG_SPVMLR_BASE_LEN, 682 + 31, 1, MLXSW_REG_SPVMLR_REC_LEN, 0x00, false); 683 + 684 + /* reg_spvmlr_rec_vid 685 + * VLAN ID to be added/removed from port or for querying. 686 + * Access: Index 687 + */ 688 + MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_vid, MLXSW_REG_SPVMLR_BASE_LEN, 0, 12, 689 + MLXSW_REG_SPVMLR_REC_LEN, 0x00, false); 690 + 691 + static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, 692 + u16 vid_begin, u16 vid_end, 693 + bool learn_enable) 694 + { 695 + int num_rec = vid_end - vid_begin + 1; 696 + int i; 697 + 698 + WARN_ON(num_rec < 1 || num_rec > MLXSW_REG_SPVMLR_REC_MAX_COUNT); 699 + 700 + MLXSW_REG_ZERO(spvmlr, payload); 701 + mlxsw_reg_spvmlr_local_port_set(payload, local_port); 702 + mlxsw_reg_spvmlr_num_rec_set(payload, num_rec); 703 + 704 + for (i = 0; i < num_rec; i++) { 705 + mlxsw_reg_spvmlr_rec_learn_enable_set(payload, i, learn_enable); 706 + mlxsw_reg_spvmlr_rec_vid_set(payload, i, vid_begin + i); 707 + } 919 708 } 920 709 921 710 /* PMLP - Ports Module to Local Port Register ··· 1732 963 mlxsw_reg_ppcnt_prio_tc_set(payload, 0); 1733 964 } 1734 965 966 + /* PBMC - Port Buffer Management Control Register 967 + * ---------------------------------------------- 968 + * The PBMC register configures and retrieves the port packet buffer 969 + * allocation for different Prios, and the Pause threshold management. 970 + */ 971 + #define MLXSW_REG_PBMC_ID 0x500C 972 + #define MLXSW_REG_PBMC_LEN 0x68 973 + 974 + static const struct mlxsw_reg_info mlxsw_reg_pbmc = { 975 + .id = MLXSW_REG_PBMC_ID, 976 + .len = MLXSW_REG_PBMC_LEN, 977 + }; 978 + 979 + /* reg_pbmc_local_port 980 + * Local port number. 981 + * Access: Index 982 + */ 983 + MLXSW_ITEM32(reg, pbmc, local_port, 0x00, 16, 8); 984 + 985 + /* reg_pbmc_xoff_timer_value 986 + * When device generates a pause frame, it uses this value as the pause 987 + * timer (time for the peer port to pause in quota-512 bit time). 988 + * Access: RW 989 + */ 990 + MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16); 991 + 992 + /* reg_pbmc_xoff_refresh 993 + * The time before a new pause frame should be sent to refresh the pause RW 994 + * state. Using the same units as xoff_timer_value above (in quota-512 bit 995 + * time). 996 + * Access: RW 997 + */ 998 + MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16); 999 + 1000 + /* reg_pbmc_buf_lossy 1001 + * The field indicates if the buffer is lossy. 1002 + * 0 - Lossless 1003 + * 1 - Lossy 1004 + * Access: RW 1005 + */ 1006 + MLXSW_ITEM32_INDEXED(reg, pbmc, buf_lossy, 0x0C, 25, 1, 0x08, 0x00, false); 1007 + 1008 + /* reg_pbmc_buf_epsb 1009 + * Eligible for Port Shared buffer. 1010 + * If epsb is set, packets assigned to buffer are allowed to insert the port 1011 + * shared buffer. 1012 + * When buf_lossy is MLXSW_REG_PBMC_LOSSY_LOSSY this field is reserved. 1013 + * Access: RW 1014 + */ 1015 + MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false); 1016 + 1017 + /* reg_pbmc_buf_size 1018 + * The part of the packet buffer array is allocated for the specific buffer. 1019 + * Units are represented in cells. 1020 + * Access: RW 1021 + */ 1022 + MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false); 1023 + 1024 + static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port, 1025 + u16 xoff_timer_value, u16 xoff_refresh) 1026 + { 1027 + MLXSW_REG_ZERO(pbmc, payload); 1028 + mlxsw_reg_pbmc_local_port_set(payload, local_port); 1029 + mlxsw_reg_pbmc_xoff_timer_value_set(payload, xoff_timer_value); 1030 + mlxsw_reg_pbmc_xoff_refresh_set(payload, xoff_refresh); 1031 + } 1032 + 1033 + static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload, 1034 + int buf_index, 1035 + u16 size) 1036 + { 1037 + mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 1); 1038 + mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0); 1039 + mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size); 1040 + } 1041 + 1735 1042 /* PSPA - Port Switch Partition Allocation 1736 1043 * --------------------------------------- 1737 1044 * Controls the association of a port with a switch partition and enables ··· 2087 1242 mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT); 2088 1243 } 2089 1244 1245 + /* SBPR - Shared Buffer Pools Register 1246 + * ----------------------------------- 1247 + * The SBPR configures and retrieves the shared buffer pools and configuration. 1248 + */ 1249 + #define MLXSW_REG_SBPR_ID 0xB001 1250 + #define MLXSW_REG_SBPR_LEN 0x14 1251 + 1252 + static const struct mlxsw_reg_info mlxsw_reg_sbpr = { 1253 + .id = MLXSW_REG_SBPR_ID, 1254 + .len = MLXSW_REG_SBPR_LEN, 1255 + }; 1256 + 1257 + enum mlxsw_reg_sbpr_dir { 1258 + MLXSW_REG_SBPR_DIR_INGRESS, 1259 + MLXSW_REG_SBPR_DIR_EGRESS, 1260 + }; 1261 + 1262 + /* reg_sbpr_dir 1263 + * Direction. 1264 + * Access: Index 1265 + */ 1266 + MLXSW_ITEM32(reg, sbpr, dir, 0x00, 24, 2); 1267 + 1268 + /* reg_sbpr_pool 1269 + * Pool index. 1270 + * Access: Index 1271 + */ 1272 + MLXSW_ITEM32(reg, sbpr, pool, 0x00, 0, 4); 1273 + 1274 + /* reg_sbpr_size 1275 + * Pool size in buffer cells. 1276 + * Access: RW 1277 + */ 1278 + MLXSW_ITEM32(reg, sbpr, size, 0x04, 0, 24); 1279 + 1280 + enum mlxsw_reg_sbpr_mode { 1281 + MLXSW_REG_SBPR_MODE_STATIC, 1282 + MLXSW_REG_SBPR_MODE_DYNAMIC, 1283 + }; 1284 + 1285 + /* reg_sbpr_mode 1286 + * Pool quota calculation mode. 1287 + * Access: RW 1288 + */ 1289 + MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4); 1290 + 1291 + static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool, 1292 + enum mlxsw_reg_sbpr_dir dir, 1293 + enum mlxsw_reg_sbpr_mode mode, u32 size) 1294 + { 1295 + MLXSW_REG_ZERO(sbpr, payload); 1296 + mlxsw_reg_sbpr_pool_set(payload, pool); 1297 + mlxsw_reg_sbpr_dir_set(payload, dir); 1298 + mlxsw_reg_sbpr_mode_set(payload, mode); 1299 + mlxsw_reg_sbpr_size_set(payload, size); 1300 + } 1301 + 1302 + /* SBCM - Shared Buffer Class Management Register 1303 + * ---------------------------------------------- 1304 + * The SBCM register configures and retrieves the shared buffer allocation 1305 + * and configuration according to Port-PG, including the binding to pool 1306 + * and definition of the associated quota. 1307 + */ 1308 + #define MLXSW_REG_SBCM_ID 0xB002 1309 + #define MLXSW_REG_SBCM_LEN 0x28 1310 + 1311 + static const struct mlxsw_reg_info mlxsw_reg_sbcm = { 1312 + .id = MLXSW_REG_SBCM_ID, 1313 + .len = MLXSW_REG_SBCM_LEN, 1314 + }; 1315 + 1316 + /* reg_sbcm_local_port 1317 + * Local port number. 1318 + * For Ingress: excludes CPU port and Router port 1319 + * For Egress: excludes IP Router 1320 + * Access: Index 1321 + */ 1322 + MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8); 1323 + 1324 + /* reg_sbcm_pg_buff 1325 + * PG buffer - Port PG (dir=ingress) / traffic class (dir=egress) 1326 + * For PG buffer: range is 0..cap_max_pg_buffers - 1 1327 + * For traffic class: range is 0..cap_max_tclass - 1 1328 + * Note that when traffic class is in MC aware mode then the traffic 1329 + * classes which are MC aware cannot be configured. 1330 + * Access: Index 1331 + */ 1332 + MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6); 1333 + 1334 + enum mlxsw_reg_sbcm_dir { 1335 + MLXSW_REG_SBCM_DIR_INGRESS, 1336 + MLXSW_REG_SBCM_DIR_EGRESS, 1337 + }; 1338 + 1339 + /* reg_sbcm_dir 1340 + * Direction. 1341 + * Access: Index 1342 + */ 1343 + MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2); 1344 + 1345 + /* reg_sbcm_min_buff 1346 + * Minimum buffer size for the limiter, in cells. 1347 + * Access: RW 1348 + */ 1349 + MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24); 1350 + 1351 + /* reg_sbcm_max_buff 1352 + * When the pool associated to the port-pg/tclass is configured to 1353 + * static, Maximum buffer size for the limiter configured in cells. 1354 + * When the pool associated to the port-pg/tclass is configured to 1355 + * dynamic, the max_buff holds the "alpha" parameter, supporting 1356 + * the following values: 1357 + * 0: 0 1358 + * i: (1/128)*2^(i-1), for i=1..14 1359 + * 0xFF: Infinity 1360 + * Access: RW 1361 + */ 1362 + MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24); 1363 + 1364 + /* reg_sbcm_pool 1365 + * Association of the port-priority to a pool. 1366 + * Access: RW 1367 + */ 1368 + MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4); 1369 + 1370 + static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff, 1371 + enum mlxsw_reg_sbcm_dir dir, 1372 + u32 min_buff, u32 max_buff, u8 pool) 1373 + { 1374 + MLXSW_REG_ZERO(sbcm, payload); 1375 + mlxsw_reg_sbcm_local_port_set(payload, local_port); 1376 + mlxsw_reg_sbcm_pg_buff_set(payload, pg_buff); 1377 + mlxsw_reg_sbcm_dir_set(payload, dir); 1378 + mlxsw_reg_sbcm_min_buff_set(payload, min_buff); 1379 + mlxsw_reg_sbcm_max_buff_set(payload, max_buff); 1380 + mlxsw_reg_sbcm_pool_set(payload, pool); 1381 + } 1382 + 1383 + /* SBPM - Shared Buffer Class Management Register 1384 + * ---------------------------------------------- 1385 + * The SBPM register configures and retrieves the shared buffer allocation 1386 + * and configuration according to Port-Pool, including the definition 1387 + * of the associated quota. 1388 + */ 1389 + #define MLXSW_REG_SBPM_ID 0xB003 1390 + #define MLXSW_REG_SBPM_LEN 0x28 1391 + 1392 + static const struct mlxsw_reg_info mlxsw_reg_sbpm = { 1393 + .id = MLXSW_REG_SBPM_ID, 1394 + .len = MLXSW_REG_SBPM_LEN, 1395 + }; 1396 + 1397 + /* reg_sbpm_local_port 1398 + * Local port number. 1399 + * For Ingress: excludes CPU port and Router port 1400 + * For Egress: excludes IP Router 1401 + * Access: Index 1402 + */ 1403 + MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8); 1404 + 1405 + /* reg_sbpm_pool 1406 + * The pool associated to quota counting on the local_port. 1407 + * Access: Index 1408 + */ 1409 + MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4); 1410 + 1411 + enum mlxsw_reg_sbpm_dir { 1412 + MLXSW_REG_SBPM_DIR_INGRESS, 1413 + MLXSW_REG_SBPM_DIR_EGRESS, 1414 + }; 1415 + 1416 + /* reg_sbpm_dir 1417 + * Direction. 1418 + * Access: Index 1419 + */ 1420 + MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2); 1421 + 1422 + /* reg_sbpm_min_buff 1423 + * Minimum buffer size for the limiter, in cells. 1424 + * Access: RW 1425 + */ 1426 + MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24); 1427 + 1428 + /* reg_sbpm_max_buff 1429 + * When the pool associated to the port-pg/tclass is configured to 1430 + * static, Maximum buffer size for the limiter configured in cells. 1431 + * When the pool associated to the port-pg/tclass is configured to 1432 + * dynamic, the max_buff holds the "alpha" parameter, supporting 1433 + * the following values: 1434 + * 0: 0 1435 + * i: (1/128)*2^(i-1), for i=1..14 1436 + * 0xFF: Infinity 1437 + * Access: RW 1438 + */ 1439 + MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24); 1440 + 1441 + static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool, 1442 + enum mlxsw_reg_sbpm_dir dir, 1443 + u32 min_buff, u32 max_buff) 1444 + { 1445 + MLXSW_REG_ZERO(sbpm, payload); 1446 + mlxsw_reg_sbpm_local_port_set(payload, local_port); 1447 + mlxsw_reg_sbpm_pool_set(payload, pool); 1448 + mlxsw_reg_sbpm_dir_set(payload, dir); 1449 + mlxsw_reg_sbpm_min_buff_set(payload, min_buff); 1450 + mlxsw_reg_sbpm_max_buff_set(payload, max_buff); 1451 + } 1452 + 1453 + /* SBMM - Shared Buffer Multicast Management Register 1454 + * -------------------------------------------------- 1455 + * The SBMM register configures and retrieves the shared buffer allocation 1456 + * and configuration for MC packets according to Switch-Priority, including 1457 + * the binding to pool and definition of the associated quota. 1458 + */ 1459 + #define MLXSW_REG_SBMM_ID 0xB004 1460 + #define MLXSW_REG_SBMM_LEN 0x28 1461 + 1462 + static const struct mlxsw_reg_info mlxsw_reg_sbmm = { 1463 + .id = MLXSW_REG_SBMM_ID, 1464 + .len = MLXSW_REG_SBMM_LEN, 1465 + }; 1466 + 1467 + /* reg_sbmm_prio 1468 + * Switch Priority. 1469 + * Access: Index 1470 + */ 1471 + MLXSW_ITEM32(reg, sbmm, prio, 0x00, 8, 4); 1472 + 1473 + /* reg_sbmm_min_buff 1474 + * Minimum buffer size for the limiter, in cells. 1475 + * Access: RW 1476 + */ 1477 + MLXSW_ITEM32(reg, sbmm, min_buff, 0x18, 0, 24); 1478 + 1479 + /* reg_sbmm_max_buff 1480 + * When the pool associated to the port-pg/tclass is configured to 1481 + * static, Maximum buffer size for the limiter configured in cells. 1482 + * When the pool associated to the port-pg/tclass is configured to 1483 + * dynamic, the max_buff holds the "alpha" parameter, supporting 1484 + * the following values: 1485 + * 0: 0 1486 + * i: (1/128)*2^(i-1), for i=1..14 1487 + * 0xFF: Infinity 1488 + * Access: RW 1489 + */ 1490 + MLXSW_ITEM32(reg, sbmm, max_buff, 0x1C, 0, 24); 1491 + 1492 + /* reg_sbmm_pool 1493 + * Association of the port-priority to a pool. 1494 + * Access: RW 1495 + */ 1496 + MLXSW_ITEM32(reg, sbmm, pool, 0x24, 0, 4); 1497 + 1498 + static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff, 1499 + u32 max_buff, u8 pool) 1500 + { 1501 + MLXSW_REG_ZERO(sbmm, payload); 1502 + mlxsw_reg_sbmm_prio_set(payload, prio); 1503 + mlxsw_reg_sbmm_min_buff_set(payload, min_buff); 1504 + mlxsw_reg_sbmm_max_buff_set(payload, max_buff); 1505 + mlxsw_reg_sbmm_pool_set(payload, pool); 1506 + } 1507 + 2090 1508 static inline const char *mlxsw_reg_id_str(u16 reg_id) 2091 1509 { 2092 1510 switch (reg_id) { ··· 2359 1251 return "SPAD"; 2360 1252 case MLXSW_REG_SSPR_ID: 2361 1253 return "SSPR"; 1254 + case MLXSW_REG_SFDAT_ID: 1255 + return "SFDAT"; 1256 + case MLXSW_REG_SFD_ID: 1257 + return "SFD"; 1258 + case MLXSW_REG_SFN_ID: 1259 + return "SFN"; 2362 1260 case MLXSW_REG_SPMS_ID: 2363 1261 return "SPMS"; 1262 + case MLXSW_REG_SPVID_ID: 1263 + return "SPVID"; 1264 + case MLXSW_REG_SPVM_ID: 1265 + return "SPVM"; 2364 1266 case MLXSW_REG_SFGC_ID: 2365 1267 return "SFGC"; 2366 1268 case MLXSW_REG_SFTR_ID: 2367 1269 return "SFTR"; 2368 1270 case MLXSW_REG_SPMLR_ID: 2369 1271 return "SPMLR"; 1272 + case MLXSW_REG_SVFA_ID: 1273 + return "SVFA"; 1274 + case MLXSW_REG_SVPE_ID: 1275 + return "SVPE"; 1276 + case MLXSW_REG_SFMR_ID: 1277 + return "SFMR"; 1278 + case MLXSW_REG_SPVMLR_ID: 1279 + return "SPVMLR"; 2370 1280 case MLXSW_REG_PMLP_ID: 2371 1281 return "PMLP"; 2372 1282 case MLXSW_REG_PMTU_ID: ··· 2397 1271 return "PAOS"; 2398 1272 case MLXSW_REG_PPCNT_ID: 2399 1273 return "PPCNT"; 1274 + case MLXSW_REG_PBMC_ID: 1275 + return "PBMC"; 2400 1276 case MLXSW_REG_PSPA_ID: 2401 1277 return "PSPA"; 2402 1278 case MLXSW_REG_HTGT_ID: 2403 1279 return "HTGT"; 2404 1280 case MLXSW_REG_HPKT_ID: 2405 1281 return "HPKT"; 1282 + case MLXSW_REG_SBPR_ID: 1283 + return "SBPR"; 1284 + case MLXSW_REG_SBCM_ID: 1285 + return "SBCM"; 1286 + case MLXSW_REG_SBPM_ID: 1287 + return "SBPM"; 1288 + case MLXSW_REG_SBMM_ID: 1289 + return "SBMM"; 2406 1290 default: 2407 1291 return "*UNKNOWN*"; 2408 1292 }
+1948
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 1 + /* 2 + * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 + * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 + * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 + * 8 + * Redistribution and use in source and binary forms, with or without 9 + * modification, are permitted provided that the following conditions are met: 10 + * 11 + * 1. Redistributions of source code must retain the above copyright 12 + * notice, this list of conditions and the following disclaimer. 13 + * 2. Redistributions in binary form must reproduce the above copyright 14 + * notice, this list of conditions and the following disclaimer in the 15 + * documentation and/or other materials provided with the distribution. 16 + * 3. Neither the names of the copyright holders nor the names of its 17 + * contributors may be used to endorse or promote products derived from 18 + * this software without specific prior written permission. 19 + * 20 + * Alternatively, this software may be distributed under the terms of the 21 + * GNU General Public License ("GPL") version 2 as published by the Free 22 + * Software Foundation. 23 + * 24 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 + * POSSIBILITY OF SUCH DAMAGE. 35 + */ 36 + 37 + #include <linux/kernel.h> 38 + #include <linux/module.h> 39 + #include <linux/types.h> 40 + #include <linux/netdevice.h> 41 + #include <linux/etherdevice.h> 42 + #include <linux/ethtool.h> 43 + #include <linux/slab.h> 44 + #include <linux/device.h> 45 + #include <linux/skbuff.h> 46 + #include <linux/if_vlan.h> 47 + #include <linux/if_bridge.h> 48 + #include <linux/workqueue.h> 49 + #include <linux/jiffies.h> 50 + #include <linux/bitops.h> 51 + #include <net/switchdev.h> 52 + #include <generated/utsrelease.h> 53 + 54 + #include "spectrum.h" 55 + #include "core.h" 56 + #include "reg.h" 57 + #include "port.h" 58 + #include "trap.h" 59 + #include "txheader.h" 60 + 61 + static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 62 + static const char mlxsw_sp_driver_version[] = "1.0"; 63 + 64 + /* tx_hdr_version 65 + * Tx header version. 66 + * Must be set to 1. 67 + */ 68 + MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 69 + 70 + /* tx_hdr_ctl 71 + * Packet control type. 72 + * 0 - Ethernet control (e.g. EMADs, LACP) 73 + * 1 - Ethernet data 74 + */ 75 + MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 76 + 77 + /* tx_hdr_proto 78 + * Packet protocol type. Must be set to 1 (Ethernet). 79 + */ 80 + MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 81 + 82 + /* tx_hdr_rx_is_router 83 + * Packet is sent from the router. Valid for data packets only. 84 + */ 85 + MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 86 + 87 + /* tx_hdr_fid_valid 88 + * Indicates if the 'fid' field is valid and should be used for 89 + * forwarding lookup. Valid for data packets only. 90 + */ 91 + MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 92 + 93 + /* tx_hdr_swid 94 + * Switch partition ID. Must be set to 0. 95 + */ 96 + MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 97 + 98 + /* tx_hdr_control_tclass 99 + * Indicates if the packet should use the control TClass and not one 100 + * of the data TClasses. 101 + */ 102 + MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 103 + 104 + /* tx_hdr_etclass 105 + * Egress TClass to be used on the egress device on the egress port. 106 + */ 107 + MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 108 + 109 + /* tx_hdr_port_mid 110 + * Destination local port for unicast packets. 111 + * Destination multicast ID for multicast packets. 112 + * 113 + * Control packets are directed to a specific egress port, while data 114 + * packets are transmitted through the CPU port (0) into the switch partition, 115 + * where forwarding rules are applied. 116 + */ 117 + MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 118 + 119 + /* tx_hdr_fid 120 + * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 121 + * set, otherwise calculated based on the packet's VID using VID to FID mapping. 122 + * Valid for data packets only. 123 + */ 124 + MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 125 + 126 + /* tx_hdr_type 127 + * 0 - Data packets 128 + * 6 - Control packets 129 + */ 130 + MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 131 + 132 + static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 133 + const struct mlxsw_tx_info *tx_info) 134 + { 135 + char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 136 + 137 + memset(txhdr, 0, MLXSW_TXHDR_LEN); 138 + 139 + mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 140 + mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 141 + mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 142 + mlxsw_tx_hdr_swid_set(txhdr, 0); 143 + mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 144 + mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 145 + mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 146 + } 147 + 148 + static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 149 + { 150 + char spad_pl[MLXSW_REG_SPAD_LEN]; 151 + int err; 152 + 153 + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 154 + if (err) 155 + return err; 156 + mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 157 + return 0; 158 + } 159 + 160 + static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 161 + bool is_up) 162 + { 163 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 164 + char paos_pl[MLXSW_REG_PAOS_LEN]; 165 + 166 + mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 167 + is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 168 + MLXSW_PORT_ADMIN_STATUS_DOWN); 169 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 170 + } 171 + 172 + static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port, 173 + bool *p_is_up) 174 + { 175 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 176 + char paos_pl[MLXSW_REG_PAOS_LEN]; 177 + u8 oper_status; 178 + int err; 179 + 180 + mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0); 181 + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 182 + if (err) 183 + return err; 184 + oper_status = mlxsw_reg_paos_oper_status_get(paos_pl); 185 + *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false; 186 + return 0; 187 + } 188 + 189 + static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) 190 + { 191 + char sfmr_pl[MLXSW_REG_SFMR_LEN]; 192 + int err; 193 + 194 + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, 195 + MLXSW_SP_VFID_BASE + vfid, 0); 196 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 197 + 198 + if (err) 199 + return err; 200 + 201 + set_bit(vfid, mlxsw_sp->active_vfids); 202 + return 0; 203 + } 204 + 205 + static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) 206 + { 207 + char sfmr_pl[MLXSW_REG_SFMR_LEN]; 208 + 209 + clear_bit(vfid, mlxsw_sp->active_vfids); 210 + 211 + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, 212 + MLXSW_SP_VFID_BASE + vfid, 0); 213 + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 214 + } 215 + 216 + static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 217 + unsigned char *addr) 218 + { 219 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 220 + char ppad_pl[MLXSW_REG_PPAD_LEN]; 221 + 222 + mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 223 + mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 224 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 225 + } 226 + 227 + static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 228 + { 229 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 230 + unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 231 + 232 + ether_addr_copy(addr, mlxsw_sp->base_mac); 233 + addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 234 + return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 235 + } 236 + 237 + static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 238 + u16 vid, enum mlxsw_reg_spms_state state) 239 + { 240 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 241 + char *spms_pl; 242 + int err; 243 + 244 + spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 245 + if (!spms_pl) 246 + return -ENOMEM; 247 + mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 248 + mlxsw_reg_spms_vid_pack(spms_pl, vid, state); 249 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 250 + kfree(spms_pl); 251 + return err; 252 + } 253 + 254 + static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 255 + { 256 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 257 + char pmtu_pl[MLXSW_REG_PMTU_LEN]; 258 + int max_mtu; 259 + int err; 260 + 261 + mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 262 + mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 263 + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 264 + if (err) 265 + return err; 266 + max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 267 + 268 + if (mtu > max_mtu) 269 + return -EINVAL; 270 + 271 + mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 272 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 273 + } 274 + 275 + static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 276 + { 277 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 278 + char pspa_pl[MLXSW_REG_PSPA_LEN]; 279 + 280 + mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 281 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 282 + } 283 + 284 + static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 285 + bool enable) 286 + { 287 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 288 + char svpe_pl[MLXSW_REG_SVPE_LEN]; 289 + 290 + mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 291 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 292 + } 293 + 294 + int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, 295 + enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, 296 + u16 vid) 297 + { 298 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 299 + char svfa_pl[MLXSW_REG_SVFA_LEN]; 300 + 301 + mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid, 302 + fid, vid); 303 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); 304 + } 305 + 306 + static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 307 + u16 vid, bool learn_enable) 308 + { 309 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 310 + char *spvmlr_pl; 311 + int err; 312 + 313 + spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 314 + if (!spvmlr_pl) 315 + return -ENOMEM; 316 + mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 317 + learn_enable); 318 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 319 + kfree(spvmlr_pl); 320 + return err; 321 + } 322 + 323 + static int 324 + mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 325 + { 326 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 327 + char sspr_pl[MLXSW_REG_SSPR_LEN]; 328 + 329 + mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 330 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 331 + } 332 + 333 + static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port, 334 + bool *p_usable) 335 + { 336 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 337 + char pmlp_pl[MLXSW_REG_PMLP_LEN]; 338 + int err; 339 + 340 + mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 341 + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 342 + if (err) 343 + return err; 344 + *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false; 345 + return 0; 346 + } 347 + 348 + static int mlxsw_sp_port_open(struct net_device *dev) 349 + { 350 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 351 + int err; 352 + 353 + err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 354 + if (err) 355 + return err; 356 + netif_start_queue(dev); 357 + return 0; 358 + } 359 + 360 + static int mlxsw_sp_port_stop(struct net_device *dev) 361 + { 362 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 363 + 364 + netif_stop_queue(dev); 365 + return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 366 + } 367 + 368 + static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 369 + struct net_device *dev) 370 + { 371 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 372 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 373 + struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 374 + const struct mlxsw_tx_info tx_info = { 375 + .local_port = mlxsw_sp_port->local_port, 376 + .is_emad = false, 377 + }; 378 + u64 len; 379 + int err; 380 + 381 + if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info)) 382 + return NETDEV_TX_BUSY; 383 + 384 + if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 385 + struct sk_buff *skb_orig = skb; 386 + 387 + skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 388 + if (!skb) { 389 + this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 390 + dev_kfree_skb_any(skb_orig); 391 + return NETDEV_TX_OK; 392 + } 393 + } 394 + 395 + if (eth_skb_pad(skb)) { 396 + this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 397 + return NETDEV_TX_OK; 398 + } 399 + 400 + mlxsw_sp_txhdr_construct(skb, &tx_info); 401 + len = skb->len; 402 + /* Due to a race we might fail here because of a full queue. In that 403 + * unlikely case we simply drop the packet. 404 + */ 405 + err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info); 406 + 407 + if (!err) { 408 + pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 409 + u64_stats_update_begin(&pcpu_stats->syncp); 410 + pcpu_stats->tx_packets++; 411 + pcpu_stats->tx_bytes += len; 412 + u64_stats_update_end(&pcpu_stats->syncp); 413 + } else { 414 + this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 415 + dev_kfree_skb_any(skb); 416 + } 417 + return NETDEV_TX_OK; 418 + } 419 + 420 + static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 421 + { 422 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 423 + struct sockaddr *addr = p; 424 + int err; 425 + 426 + if (!is_valid_ether_addr(addr->sa_data)) 427 + return -EADDRNOTAVAIL; 428 + 429 + err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 430 + if (err) 431 + return err; 432 + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 433 + return 0; 434 + } 435 + 436 + static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 437 + { 438 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 439 + int err; 440 + 441 + err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 442 + if (err) 443 + return err; 444 + dev->mtu = mtu; 445 + return 0; 446 + } 447 + 448 + static struct rtnl_link_stats64 * 449 + mlxsw_sp_port_get_stats64(struct net_device *dev, 450 + struct rtnl_link_stats64 *stats) 451 + { 452 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 453 + struct mlxsw_sp_port_pcpu_stats *p; 454 + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 455 + u32 tx_dropped = 0; 456 + unsigned int start; 457 + int i; 458 + 459 + for_each_possible_cpu(i) { 460 + p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 461 + do { 462 + start = u64_stats_fetch_begin_irq(&p->syncp); 463 + rx_packets = p->rx_packets; 464 + rx_bytes = p->rx_bytes; 465 + tx_packets = p->tx_packets; 466 + tx_bytes = p->tx_bytes; 467 + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 468 + 469 + stats->rx_packets += rx_packets; 470 + stats->rx_bytes += rx_bytes; 471 + stats->tx_packets += tx_packets; 472 + stats->tx_bytes += tx_bytes; 473 + /* tx_dropped is u32, updated without syncp protection. */ 474 + tx_dropped += p->tx_dropped; 475 + } 476 + stats->tx_dropped = tx_dropped; 477 + return stats; 478 + } 479 + 480 + int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 481 + u16 vid_end, bool is_member, bool untagged) 482 + { 483 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 484 + char *spvm_pl; 485 + int err; 486 + 487 + spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 488 + if (!spvm_pl) 489 + return -ENOMEM; 490 + 491 + mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 492 + vid_end, is_member, untagged); 493 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 494 + kfree(spvm_pl); 495 + return err; 496 + } 497 + 498 + static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 499 + { 500 + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 501 + u16 vid, last_visited_vid; 502 + int err; 503 + 504 + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 505 + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid, 506 + vid); 507 + if (err) { 508 + last_visited_vid = vid; 509 + goto err_port_vid_to_fid_set; 510 + } 511 + } 512 + 513 + err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 514 + if (err) { 515 + last_visited_vid = VLAN_N_VID; 516 + goto err_port_vid_to_fid_set; 517 + } 518 + 519 + return 0; 520 + 521 + err_port_vid_to_fid_set: 522 + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 523 + mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid, 524 + vid); 525 + return err; 526 + } 527 + 528 + static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 529 + { 530 + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 531 + u16 vid; 532 + int err; 533 + 534 + err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 535 + if (err) 536 + return err; 537 + 538 + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 539 + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, 540 + vid, vid); 541 + if (err) 542 + return err; 543 + } 544 + 545 + return 0; 546 + } 547 + 548 + int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, 549 + u16 vid) 550 + { 551 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 552 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 553 + char *sftr_pl; 554 + int err; 555 + 556 + /* VLAN 0 is added to HW filter when device goes up, but it is 557 + * reserved in our case, so simply return. 558 + */ 559 + if (!vid) 560 + return 0; 561 + 562 + if (test_bit(vid, mlxsw_sp_port->active_vfids)) { 563 + netdev_warn(dev, "VID=%d already configured\n", vid); 564 + return 0; 565 + } 566 + 567 + if (!test_bit(vid, mlxsw_sp->active_vfids)) { 568 + err = mlxsw_sp_vfid_create(mlxsw_sp, vid); 569 + if (err) { 570 + netdev_err(dev, "Failed to create vFID=%d\n", 571 + MLXSW_SP_VFID_BASE + vid); 572 + return err; 573 + } 574 + 575 + sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); 576 + if (!sftr_pl) { 577 + err = -ENOMEM; 578 + goto err_flood_table_alloc; 579 + } 580 + mlxsw_reg_sftr_pack(sftr_pl, 0, vid, 581 + MLXSW_REG_SFGC_TABLE_TYPE_FID, 0, 582 + MLXSW_PORT_CPU_PORT, true); 583 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 584 + kfree(sftr_pl); 585 + if (err) { 586 + netdev_err(dev, "Failed to configure flood table\n"); 587 + goto err_flood_table_config; 588 + } 589 + } 590 + 591 + /* In case we fail in the following steps, we intentionally do not 592 + * destroy the associated vFID. 593 + */ 594 + 595 + /* When adding the first VLAN interface on a bridged port we need to 596 + * transition all the active 802.1Q bridge VLANs to use explicit 597 + * {Port, VID} to FID mappings and set the port's mode to Virtual mode. 598 + */ 599 + if (!mlxsw_sp_port->nr_vfids) { 600 + err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); 601 + if (err) { 602 + netdev_err(dev, "Failed to set to Virtual mode\n"); 603 + return err; 604 + } 605 + } 606 + 607 + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, 608 + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 609 + true, MLXSW_SP_VFID_BASE + vid, vid); 610 + if (err) { 611 + netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", 612 + vid, MLXSW_SP_VFID_BASE + vid); 613 + goto err_port_vid_to_fid_set; 614 + } 615 + 616 + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); 617 + if (err) { 618 + netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); 619 + goto err_port_vid_learning_set; 620 + } 621 + 622 + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false); 623 + if (err) { 624 + netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 625 + vid); 626 + goto err_port_add_vid; 627 + } 628 + 629 + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid, 630 + MLXSW_REG_SPMS_STATE_FORWARDING); 631 + if (err) { 632 + netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); 633 + goto err_port_stp_state_set; 634 + } 635 + 636 + mlxsw_sp_port->nr_vfids++; 637 + set_bit(vid, mlxsw_sp_port->active_vfids); 638 + 639 + return 0; 640 + 641 + err_flood_table_config: 642 + err_flood_table_alloc: 643 + mlxsw_sp_vfid_destroy(mlxsw_sp, vid); 644 + return err; 645 + 646 + err_port_stp_state_set: 647 + mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 648 + err_port_add_vid: 649 + mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 650 + err_port_vid_learning_set: 651 + mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, 652 + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, 653 + MLXSW_SP_VFID_BASE + vid, vid); 654 + err_port_vid_to_fid_set: 655 + mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 656 + return err; 657 + } 658 + 659 + int mlxsw_sp_port_kill_vid(struct net_device *dev, 660 + __be16 __always_unused proto, u16 vid) 661 + { 662 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 663 + int err; 664 + 665 + /* VLAN 0 is removed from HW filter when device goes down, but 666 + * it is reserved in our case, so simply return. 667 + */ 668 + if (!vid) 669 + return 0; 670 + 671 + if (!test_bit(vid, mlxsw_sp_port->active_vfids)) { 672 + netdev_warn(dev, "VID=%d does not exist\n", vid); 673 + return 0; 674 + } 675 + 676 + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid, 677 + MLXSW_REG_SPMS_STATE_DISCARDING); 678 + if (err) { 679 + netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); 680 + return err; 681 + } 682 + 683 + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 684 + if (err) { 685 + netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 686 + vid); 687 + return err; 688 + } 689 + 690 + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 691 + if (err) { 692 + netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); 693 + return err; 694 + } 695 + 696 + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, 697 + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 698 + false, MLXSW_SP_VFID_BASE + vid, 699 + vid); 700 + if (err) { 701 + netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", 702 + vid, MLXSW_SP_VFID_BASE + vid); 703 + return err; 704 + } 705 + 706 + /* When removing the last VLAN interface on a bridged port we need to 707 + * transition all active 802.1Q bridge VLANs to use VID to FID 708 + * mappings and set port's mode to VLAN mode. 709 + */ 710 + if (mlxsw_sp_port->nr_vfids == 1) { 711 + err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 712 + if (err) { 713 + netdev_err(dev, "Failed to set to VLAN mode\n"); 714 + return err; 715 + } 716 + } 717 + 718 + mlxsw_sp_port->nr_vfids--; 719 + clear_bit(vid, mlxsw_sp_port->active_vfids); 720 + 721 + return 0; 722 + } 723 + 724 + static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 725 + .ndo_open = mlxsw_sp_port_open, 726 + .ndo_stop = mlxsw_sp_port_stop, 727 + .ndo_start_xmit = mlxsw_sp_port_xmit, 728 + .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 729 + .ndo_change_mtu = mlxsw_sp_port_change_mtu, 730 + .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 731 + .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 732 + .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 733 + .ndo_fdb_add = switchdev_port_fdb_add, 734 + .ndo_fdb_del = switchdev_port_fdb_del, 735 + .ndo_fdb_dump = switchdev_port_fdb_dump, 736 + .ndo_bridge_setlink = switchdev_port_bridge_setlink, 737 + .ndo_bridge_getlink = switchdev_port_bridge_getlink, 738 + .ndo_bridge_dellink = switchdev_port_bridge_dellink, 739 + }; 740 + 741 + static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 742 + struct ethtool_drvinfo *drvinfo) 743 + { 744 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 745 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 746 + 747 + strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 748 + strlcpy(drvinfo->version, mlxsw_sp_driver_version, 749 + sizeof(drvinfo->version)); 750 + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 751 + "%d.%d.%d", 752 + mlxsw_sp->bus_info->fw_rev.major, 753 + mlxsw_sp->bus_info->fw_rev.minor, 754 + mlxsw_sp->bus_info->fw_rev.subminor); 755 + strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 756 + sizeof(drvinfo->bus_info)); 757 + } 758 + 759 + struct mlxsw_sp_port_hw_stats { 760 + char str[ETH_GSTRING_LEN]; 761 + u64 (*getter)(char *payload); 762 + }; 763 + 764 + static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 765 + { 766 + .str = "a_frames_transmitted_ok", 767 + .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 768 + }, 769 + { 770 + .str = "a_frames_received_ok", 771 + .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 772 + }, 773 + { 774 + .str = "a_frame_check_sequence_errors", 775 + .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 776 + }, 777 + { 778 + .str = "a_alignment_errors", 779 + .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 780 + }, 781 + { 782 + .str = "a_octets_transmitted_ok", 783 + .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 784 + }, 785 + { 786 + .str = "a_octets_received_ok", 787 + .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 788 + }, 789 + { 790 + .str = "a_multicast_frames_xmitted_ok", 791 + .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 792 + }, 793 + { 794 + .str = "a_broadcast_frames_xmitted_ok", 795 + .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 796 + }, 797 + { 798 + .str = "a_multicast_frames_received_ok", 799 + .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 800 + }, 801 + { 802 + .str = "a_broadcast_frames_received_ok", 803 + .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 804 + }, 805 + { 806 + .str = "a_in_range_length_errors", 807 + .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 808 + }, 809 + { 810 + .str = "a_out_of_range_length_field", 811 + .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 812 + }, 813 + { 814 + .str = "a_frame_too_long_errors", 815 + .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 816 + }, 817 + { 818 + .str = "a_symbol_error_during_carrier", 819 + .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 820 + }, 821 + { 822 + .str = "a_mac_control_frames_transmitted", 823 + .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 824 + }, 825 + { 826 + .str = "a_mac_control_frames_received", 827 + .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 828 + }, 829 + { 830 + .str = "a_unsupported_opcodes_received", 831 + .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 832 + }, 833 + { 834 + .str = "a_pause_mac_ctrl_frames_received", 835 + .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 836 + }, 837 + { 838 + .str = "a_pause_mac_ctrl_frames_xmitted", 839 + .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 840 + }, 841 + }; 842 + 843 + #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 844 + 845 + static void mlxsw_sp_port_get_strings(struct net_device *dev, 846 + u32 stringset, u8 *data) 847 + { 848 + u8 *p = data; 849 + int i; 850 + 851 + switch (stringset) { 852 + case ETH_SS_STATS: 853 + for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 854 + memcpy(p, mlxsw_sp_port_hw_stats[i].str, 855 + ETH_GSTRING_LEN); 856 + p += ETH_GSTRING_LEN; 857 + } 858 + break; 859 + } 860 + } 861 + 862 + static void mlxsw_sp_port_get_stats(struct net_device *dev, 863 + struct ethtool_stats *stats, u64 *data) 864 + { 865 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 866 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 867 + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 868 + int i; 869 + int err; 870 + 871 + mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port); 872 + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 873 + for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) 874 + data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0; 875 + } 876 + 877 + static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 878 + { 879 + switch (sset) { 880 + case ETH_SS_STATS: 881 + return MLXSW_SP_PORT_HW_STATS_LEN; 882 + default: 883 + return -EOPNOTSUPP; 884 + } 885 + } 886 + 887 + struct mlxsw_sp_port_link_mode { 888 + u32 mask; 889 + u32 supported; 890 + u32 advertised; 891 + u32 speed; 892 + }; 893 + 894 + static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 895 + { 896 + .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 897 + .supported = SUPPORTED_100baseT_Full, 898 + .advertised = ADVERTISED_100baseT_Full, 899 + .speed = 100, 900 + }, 901 + { 902 + .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX, 903 + .speed = 100, 904 + }, 905 + { 906 + .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 907 + MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 908 + .supported = SUPPORTED_1000baseKX_Full, 909 + .advertised = ADVERTISED_1000baseKX_Full, 910 + .speed = 1000, 911 + }, 912 + { 913 + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 914 + .supported = SUPPORTED_10000baseT_Full, 915 + .advertised = ADVERTISED_10000baseT_Full, 916 + .speed = 10000, 917 + }, 918 + { 919 + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 920 + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 921 + .supported = SUPPORTED_10000baseKX4_Full, 922 + .advertised = ADVERTISED_10000baseKX4_Full, 923 + .speed = 10000, 924 + }, 925 + { 926 + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 927 + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 928 + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 929 + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 930 + .supported = SUPPORTED_10000baseKR_Full, 931 + .advertised = ADVERTISED_10000baseKR_Full, 932 + .speed = 10000, 933 + }, 934 + { 935 + .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 936 + .supported = SUPPORTED_20000baseKR2_Full, 937 + .advertised = ADVERTISED_20000baseKR2_Full, 938 + .speed = 20000, 939 + }, 940 + { 941 + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 942 + .supported = SUPPORTED_40000baseCR4_Full, 943 + .advertised = ADVERTISED_40000baseCR4_Full, 944 + .speed = 40000, 945 + }, 946 + { 947 + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 948 + .supported = SUPPORTED_40000baseKR4_Full, 949 + .advertised = ADVERTISED_40000baseKR4_Full, 950 + .speed = 40000, 951 + }, 952 + { 953 + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 954 + .supported = SUPPORTED_40000baseSR4_Full, 955 + .advertised = ADVERTISED_40000baseSR4_Full, 956 + .speed = 40000, 957 + }, 958 + { 959 + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 960 + .supported = SUPPORTED_40000baseLR4_Full, 961 + .advertised = ADVERTISED_40000baseLR4_Full, 962 + .speed = 40000, 963 + }, 964 + { 965 + .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR | 966 + MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR | 967 + MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 968 + .speed = 25000, 969 + }, 970 + { 971 + .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 | 972 + MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 | 973 + MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 974 + .speed = 50000, 975 + }, 976 + { 977 + .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 978 + .supported = SUPPORTED_56000baseKR4_Full, 979 + .advertised = ADVERTISED_56000baseKR4_Full, 980 + .speed = 56000, 981 + }, 982 + { 983 + .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 | 984 + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 985 + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 986 + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 987 + .speed = 100000, 988 + }, 989 + }; 990 + 991 + #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 992 + 993 + static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto) 994 + { 995 + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 996 + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 997 + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 998 + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 999 + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1000 + MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1001 + return SUPPORTED_FIBRE; 1002 + 1003 + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1004 + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1005 + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1006 + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1007 + MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 1008 + return SUPPORTED_Backplane; 1009 + return 0; 1010 + } 1011 + 1012 + static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto) 1013 + { 1014 + u32 modes = 0; 1015 + int i; 1016 + 1017 + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1018 + if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1019 + modes |= mlxsw_sp_port_link_mode[i].supported; 1020 + } 1021 + return modes; 1022 + } 1023 + 1024 + static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto) 1025 + { 1026 + u32 modes = 0; 1027 + int i; 1028 + 1029 + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1030 + if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1031 + modes |= mlxsw_sp_port_link_mode[i].advertised; 1032 + } 1033 + return modes; 1034 + } 1035 + 1036 + static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 1037 + struct ethtool_cmd *cmd) 1038 + { 1039 + u32 speed = SPEED_UNKNOWN; 1040 + u8 duplex = DUPLEX_UNKNOWN; 1041 + int i; 1042 + 1043 + if (!carrier_ok) 1044 + goto out; 1045 + 1046 + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1047 + if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 1048 + speed = mlxsw_sp_port_link_mode[i].speed; 1049 + duplex = DUPLEX_FULL; 1050 + break; 1051 + } 1052 + } 1053 + out: 1054 + ethtool_cmd_speed_set(cmd, speed); 1055 + cmd->duplex = duplex; 1056 + } 1057 + 1058 + static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 1059 + { 1060 + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1061 + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1062 + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1063 + MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1064 + return PORT_FIBRE; 1065 + 1066 + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1067 + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1068 + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 1069 + return PORT_DA; 1070 + 1071 + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1072 + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1073 + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1074 + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 1075 + return PORT_NONE; 1076 + 1077 + return PORT_OTHER; 1078 + } 1079 + 1080 + static int mlxsw_sp_port_get_settings(struct net_device *dev, 1081 + struct ethtool_cmd *cmd) 1082 + { 1083 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1084 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1085 + char ptys_pl[MLXSW_REG_PTYS_LEN]; 1086 + u32 eth_proto_cap; 1087 + u32 eth_proto_admin; 1088 + u32 eth_proto_oper; 1089 + int err; 1090 + 1091 + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1092 + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1093 + if (err) { 1094 + netdev_err(dev, "Failed to get proto"); 1095 + return err; 1096 + } 1097 + mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, 1098 + &eth_proto_admin, &eth_proto_oper); 1099 + 1100 + cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) | 1101 + mlxsw_sp_from_ptys_supported_link(eth_proto_cap) | 1102 + SUPPORTED_Pause | SUPPORTED_Asym_Pause; 1103 + cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin); 1104 + mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), 1105 + eth_proto_oper, cmd); 1106 + 1107 + eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 1108 + cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper); 1109 + cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper); 1110 + 1111 + cmd->transceiver = XCVR_INTERNAL; 1112 + return 0; 1113 + } 1114 + 1115 + static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising) 1116 + { 1117 + u32 ptys_proto = 0; 1118 + int i; 1119 + 1120 + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1121 + if (advertising & mlxsw_sp_port_link_mode[i].advertised) 1122 + ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1123 + } 1124 + return ptys_proto; 1125 + } 1126 + 1127 + static u32 mlxsw_sp_to_ptys_speed(u32 speed) 1128 + { 1129 + u32 ptys_proto = 0; 1130 + int i; 1131 + 1132 + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1133 + if (speed == mlxsw_sp_port_link_mode[i].speed) 1134 + ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1135 + } 1136 + return ptys_proto; 1137 + } 1138 + 1139 + static int mlxsw_sp_port_set_settings(struct net_device *dev, 1140 + struct ethtool_cmd *cmd) 1141 + { 1142 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1143 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1144 + char ptys_pl[MLXSW_REG_PTYS_LEN]; 1145 + u32 speed; 1146 + u32 eth_proto_new; 1147 + u32 eth_proto_cap; 1148 + u32 eth_proto_admin; 1149 + bool is_up; 1150 + int err; 1151 + 1152 + speed = ethtool_cmd_speed(cmd); 1153 + 1154 + eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ? 1155 + mlxsw_sp_to_ptys_advert_link(cmd->advertising) : 1156 + mlxsw_sp_to_ptys_speed(speed); 1157 + 1158 + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1159 + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1160 + if (err) { 1161 + netdev_err(dev, "Failed to get proto"); 1162 + return err; 1163 + } 1164 + mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL); 1165 + 1166 + eth_proto_new = eth_proto_new & eth_proto_cap; 1167 + if (!eth_proto_new) { 1168 + netdev_err(dev, "Not supported proto admin requested"); 1169 + return -EINVAL; 1170 + } 1171 + if (eth_proto_new == eth_proto_admin) 1172 + return 0; 1173 + 1174 + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new); 1175 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1176 + if (err) { 1177 + netdev_err(dev, "Failed to set proto admin"); 1178 + return err; 1179 + } 1180 + 1181 + err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up); 1182 + if (err) { 1183 + netdev_err(dev, "Failed to get oper status"); 1184 + return err; 1185 + } 1186 + if (!is_up) 1187 + return 0; 1188 + 1189 + err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1190 + if (err) { 1191 + netdev_err(dev, "Failed to set admin status"); 1192 + return err; 1193 + } 1194 + 1195 + err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1196 + if (err) { 1197 + netdev_err(dev, "Failed to set admin status"); 1198 + return err; 1199 + } 1200 + 1201 + return 0; 1202 + } 1203 + 1204 + static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 1205 + .get_drvinfo = mlxsw_sp_port_get_drvinfo, 1206 + .get_link = ethtool_op_get_link, 1207 + .get_strings = mlxsw_sp_port_get_strings, 1208 + .get_ethtool_stats = mlxsw_sp_port_get_stats, 1209 + .get_sset_count = mlxsw_sp_port_get_sset_count, 1210 + .get_settings = mlxsw_sp_port_get_settings, 1211 + .set_settings = mlxsw_sp_port_set_settings, 1212 + }; 1213 + 1214 + static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1215 + { 1216 + struct mlxsw_sp_port *mlxsw_sp_port; 1217 + struct net_device *dev; 1218 + bool usable; 1219 + int err; 1220 + 1221 + dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1222 + if (!dev) 1223 + return -ENOMEM; 1224 + mlxsw_sp_port = netdev_priv(dev); 1225 + mlxsw_sp_port->dev = dev; 1226 + mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1227 + mlxsw_sp_port->local_port = local_port; 1228 + mlxsw_sp_port->learning = 1; 1229 + mlxsw_sp_port->learning_sync = 1; 1230 + mlxsw_sp_port->pvid = 1; 1231 + 1232 + mlxsw_sp_port->pcpu_stats = 1233 + netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1234 + if (!mlxsw_sp_port->pcpu_stats) { 1235 + err = -ENOMEM; 1236 + goto err_alloc_stats; 1237 + } 1238 + 1239 + dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1240 + dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1241 + 1242 + err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1243 + if (err) { 1244 + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1245 + mlxsw_sp_port->local_port); 1246 + goto err_dev_addr_init; 1247 + } 1248 + 1249 + netif_carrier_off(dev); 1250 + 1251 + dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1252 + NETIF_F_HW_VLAN_CTAG_FILTER; 1253 + 1254 + /* Each packet needs to have a Tx header (metadata) on top all other 1255 + * headers. 1256 + */ 1257 + dev->hard_header_len += MLXSW_TXHDR_LEN; 1258 + 1259 + err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable); 1260 + if (err) { 1261 + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n", 1262 + mlxsw_sp_port->local_port); 1263 + goto err_port_module_check; 1264 + } 1265 + 1266 + if (!usable) { 1267 + dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n", 1268 + mlxsw_sp_port->local_port); 1269 + goto port_not_usable; 1270 + } 1271 + 1272 + err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1273 + if (err) { 1274 + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1275 + mlxsw_sp_port->local_port); 1276 + goto err_port_system_port_mapping_set; 1277 + } 1278 + 1279 + err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 1280 + if (err) { 1281 + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1282 + mlxsw_sp_port->local_port); 1283 + goto err_port_swid_set; 1284 + } 1285 + 1286 + err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1287 + if (err) { 1288 + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1289 + mlxsw_sp_port->local_port); 1290 + goto err_port_mtu_set; 1291 + } 1292 + 1293 + err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1294 + if (err) 1295 + goto err_port_admin_status_set; 1296 + 1297 + err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1298 + if (err) { 1299 + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1300 + mlxsw_sp_port->local_port); 1301 + goto err_port_buffers_init; 1302 + } 1303 + 1304 + mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 1305 + err = register_netdev(dev); 1306 + if (err) { 1307 + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1308 + mlxsw_sp_port->local_port); 1309 + goto err_register_netdev; 1310 + } 1311 + 1312 + err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); 1313 + if (err) 1314 + goto err_port_vlan_init; 1315 + 1316 + mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1317 + return 0; 1318 + 1319 + err_port_vlan_init: 1320 + unregister_netdev(dev); 1321 + err_register_netdev: 1322 + err_port_buffers_init: 1323 + err_port_admin_status_set: 1324 + err_port_mtu_set: 1325 + err_port_swid_set: 1326 + err_port_system_port_mapping_set: 1327 + port_not_usable: 1328 + err_port_module_check: 1329 + err_dev_addr_init: 1330 + free_percpu(mlxsw_sp_port->pcpu_stats); 1331 + err_alloc_stats: 1332 + free_netdev(dev); 1333 + return err; 1334 + } 1335 + 1336 + static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp) 1337 + { 1338 + u16 vfid; 1339 + 1340 + for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID) 1341 + mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); 1342 + } 1343 + 1344 + static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1345 + { 1346 + struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1347 + 1348 + if (!mlxsw_sp_port) 1349 + return; 1350 + mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); 1351 + unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1352 + mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 1353 + free_percpu(mlxsw_sp_port->pcpu_stats); 1354 + free_netdev(mlxsw_sp_port->dev); 1355 + } 1356 + 1357 + static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 1358 + { 1359 + int i; 1360 + 1361 + for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) 1362 + mlxsw_sp_port_remove(mlxsw_sp, i); 1363 + kfree(mlxsw_sp->ports); 1364 + } 1365 + 1366 + static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 1367 + { 1368 + size_t alloc_size; 1369 + int i; 1370 + int err; 1371 + 1372 + alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS; 1373 + mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 1374 + if (!mlxsw_sp->ports) 1375 + return -ENOMEM; 1376 + 1377 + for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { 1378 + err = mlxsw_sp_port_create(mlxsw_sp, i); 1379 + if (err) 1380 + goto err_port_create; 1381 + } 1382 + return 0; 1383 + 1384 + err_port_create: 1385 + for (i--; i >= 1; i--) 1386 + mlxsw_sp_port_remove(mlxsw_sp, i); 1387 + kfree(mlxsw_sp->ports); 1388 + return err; 1389 + } 1390 + 1391 + static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 1392 + char *pude_pl, void *priv) 1393 + { 1394 + struct mlxsw_sp *mlxsw_sp = priv; 1395 + struct mlxsw_sp_port *mlxsw_sp_port; 1396 + enum mlxsw_reg_pude_oper_status status; 1397 + u8 local_port; 1398 + 1399 + local_port = mlxsw_reg_pude_local_port_get(pude_pl); 1400 + mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1401 + if (!mlxsw_sp_port) { 1402 + dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n", 1403 + local_port); 1404 + return; 1405 + } 1406 + 1407 + status = mlxsw_reg_pude_oper_status_get(pude_pl); 1408 + if (status == MLXSW_PORT_OPER_STATUS_UP) { 1409 + netdev_info(mlxsw_sp_port->dev, "link up\n"); 1410 + netif_carrier_on(mlxsw_sp_port->dev); 1411 + } else { 1412 + netdev_info(mlxsw_sp_port->dev, "link down\n"); 1413 + netif_carrier_off(mlxsw_sp_port->dev); 1414 + } 1415 + } 1416 + 1417 + static struct mlxsw_event_listener mlxsw_sp_pude_event = { 1418 + .func = mlxsw_sp_pude_event_func, 1419 + .trap_id = MLXSW_TRAP_ID_PUDE, 1420 + }; 1421 + 1422 + static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp, 1423 + enum mlxsw_event_trap_id trap_id) 1424 + { 1425 + struct mlxsw_event_listener *el; 1426 + char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1427 + int err; 1428 + 1429 + switch (trap_id) { 1430 + case MLXSW_TRAP_ID_PUDE: 1431 + el = &mlxsw_sp_pude_event; 1432 + break; 1433 + } 1434 + err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp); 1435 + if (err) 1436 + return err; 1437 + 1438 + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); 1439 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1440 + if (err) 1441 + goto err_event_trap_set; 1442 + 1443 + return 0; 1444 + 1445 + err_event_trap_set: 1446 + mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 1447 + return err; 1448 + } 1449 + 1450 + static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp, 1451 + enum mlxsw_event_trap_id trap_id) 1452 + { 1453 + struct mlxsw_event_listener *el; 1454 + 1455 + switch (trap_id) { 1456 + case MLXSW_TRAP_ID_PUDE: 1457 + el = &mlxsw_sp_pude_event; 1458 + break; 1459 + } 1460 + mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 1461 + } 1462 + 1463 + static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port, 1464 + void *priv) 1465 + { 1466 + struct mlxsw_sp *mlxsw_sp = priv; 1467 + struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1468 + struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 1469 + 1470 + if (unlikely(!mlxsw_sp_port)) { 1471 + dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 1472 + local_port); 1473 + return; 1474 + } 1475 + 1476 + skb->dev = mlxsw_sp_port->dev; 1477 + 1478 + pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 1479 + u64_stats_update_begin(&pcpu_stats->syncp); 1480 + pcpu_stats->rx_packets++; 1481 + pcpu_stats->rx_bytes += skb->len; 1482 + u64_stats_update_end(&pcpu_stats->syncp); 1483 + 1484 + skb->protocol = eth_type_trans(skb, skb->dev); 1485 + netif_receive_skb(skb); 1486 + } 1487 + 1488 + static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { 1489 + { 1490 + .func = mlxsw_sp_rx_listener_func, 1491 + .local_port = MLXSW_PORT_DONT_CARE, 1492 + .trap_id = MLXSW_TRAP_ID_FDB_MC, 1493 + }, 1494 + /* Traps for specific L2 packet types, not trapped as FDB MC */ 1495 + { 1496 + .func = mlxsw_sp_rx_listener_func, 1497 + .local_port = MLXSW_PORT_DONT_CARE, 1498 + .trap_id = MLXSW_TRAP_ID_STP, 1499 + }, 1500 + { 1501 + .func = mlxsw_sp_rx_listener_func, 1502 + .local_port = MLXSW_PORT_DONT_CARE, 1503 + .trap_id = MLXSW_TRAP_ID_LACP, 1504 + }, 1505 + { 1506 + .func = mlxsw_sp_rx_listener_func, 1507 + .local_port = MLXSW_PORT_DONT_CARE, 1508 + .trap_id = MLXSW_TRAP_ID_EAPOL, 1509 + }, 1510 + { 1511 + .func = mlxsw_sp_rx_listener_func, 1512 + .local_port = MLXSW_PORT_DONT_CARE, 1513 + .trap_id = MLXSW_TRAP_ID_LLDP, 1514 + }, 1515 + { 1516 + .func = mlxsw_sp_rx_listener_func, 1517 + .local_port = MLXSW_PORT_DONT_CARE, 1518 + .trap_id = MLXSW_TRAP_ID_MMRP, 1519 + }, 1520 + { 1521 + .func = mlxsw_sp_rx_listener_func, 1522 + .local_port = MLXSW_PORT_DONT_CARE, 1523 + .trap_id = MLXSW_TRAP_ID_MVRP, 1524 + }, 1525 + { 1526 + .func = mlxsw_sp_rx_listener_func, 1527 + .local_port = MLXSW_PORT_DONT_CARE, 1528 + .trap_id = MLXSW_TRAP_ID_RPVST, 1529 + }, 1530 + { 1531 + .func = mlxsw_sp_rx_listener_func, 1532 + .local_port = MLXSW_PORT_DONT_CARE, 1533 + .trap_id = MLXSW_TRAP_ID_DHCP, 1534 + }, 1535 + { 1536 + .func = mlxsw_sp_rx_listener_func, 1537 + .local_port = MLXSW_PORT_DONT_CARE, 1538 + .trap_id = MLXSW_TRAP_ID_IGMP_QUERY, 1539 + }, 1540 + { 1541 + .func = mlxsw_sp_rx_listener_func, 1542 + .local_port = MLXSW_PORT_DONT_CARE, 1543 + .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT, 1544 + }, 1545 + { 1546 + .func = mlxsw_sp_rx_listener_func, 1547 + .local_port = MLXSW_PORT_DONT_CARE, 1548 + .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT, 1549 + }, 1550 + { 1551 + .func = mlxsw_sp_rx_listener_func, 1552 + .local_port = MLXSW_PORT_DONT_CARE, 1553 + .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE, 1554 + }, 1555 + { 1556 + .func = mlxsw_sp_rx_listener_func, 1557 + .local_port = MLXSW_PORT_DONT_CARE, 1558 + .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, 1559 + }, 1560 + }; 1561 + 1562 + static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 1563 + { 1564 + char htgt_pl[MLXSW_REG_HTGT_LEN]; 1565 + char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1566 + int i; 1567 + int err; 1568 + 1569 + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX); 1570 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 1571 + if (err) 1572 + return err; 1573 + 1574 + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); 1575 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 1576 + if (err) 1577 + return err; 1578 + 1579 + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 1580 + err = mlxsw_core_rx_listener_register(mlxsw_sp->core, 1581 + &mlxsw_sp_rx_listener[i], 1582 + mlxsw_sp); 1583 + if (err) 1584 + goto err_rx_listener_register; 1585 + 1586 + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, 1587 + mlxsw_sp_rx_listener[i].trap_id); 1588 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1589 + if (err) 1590 + goto err_rx_trap_set; 1591 + } 1592 + return 0; 1593 + 1594 + err_rx_trap_set: 1595 + mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 1596 + &mlxsw_sp_rx_listener[i], 1597 + mlxsw_sp); 1598 + err_rx_listener_register: 1599 + for (i--; i >= 0; i--) { 1600 + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, 1601 + mlxsw_sp_rx_listener[i].trap_id); 1602 + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1603 + 1604 + mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 1605 + &mlxsw_sp_rx_listener[i], 1606 + mlxsw_sp); 1607 + } 1608 + return err; 1609 + } 1610 + 1611 + static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 1612 + { 1613 + char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1614 + int i; 1615 + 1616 + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 1617 + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, 1618 + mlxsw_sp_rx_listener[i].trap_id); 1619 + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1620 + 1621 + mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 1622 + &mlxsw_sp_rx_listener[i], 1623 + mlxsw_sp); 1624 + } 1625 + } 1626 + 1627 + static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, 1628 + enum mlxsw_reg_sfgc_type type, 1629 + enum mlxsw_reg_sfgc_bridge_type bridge_type) 1630 + { 1631 + enum mlxsw_flood_table_type table_type; 1632 + enum mlxsw_sp_flood_table flood_table; 1633 + char sfgc_pl[MLXSW_REG_SFGC_LEN]; 1634 + 1635 + if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) { 1636 + table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 1637 + flood_table = 0; 1638 + } else { 1639 + table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 1640 + if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) 1641 + flood_table = MLXSW_SP_FLOOD_TABLE_UC; 1642 + else 1643 + flood_table = MLXSW_SP_FLOOD_TABLE_BM; 1644 + } 1645 + 1646 + mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, 1647 + flood_table); 1648 + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl); 1649 + } 1650 + 1651 + static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) 1652 + { 1653 + int type, err; 1654 + 1655 + /* For non-offloaded netdevs, flood all traffic types to CPU 1656 + * port. 1657 + */ 1658 + for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { 1659 + if (type == MLXSW_REG_SFGC_TYPE_RESERVED) 1660 + continue; 1661 + 1662 + err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 1663 + MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); 1664 + if (err) 1665 + return err; 1666 + } 1667 + 1668 + /* For bridged ports, use one flooding table for unknown unicast 1669 + * traffic and a second table for unregistered multicast and 1670 + * broadcast. 1671 + */ 1672 + for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { 1673 + if (type == MLXSW_REG_SFGC_TYPE_RESERVED) 1674 + continue; 1675 + 1676 + err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 1677 + MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); 1678 + if (err) 1679 + return err; 1680 + } 1681 + 1682 + return 0; 1683 + } 1684 + 1685 + static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, 1686 + const struct mlxsw_bus_info *mlxsw_bus_info) 1687 + { 1688 + struct mlxsw_sp *mlxsw_sp = priv; 1689 + int err; 1690 + 1691 + mlxsw_sp->core = mlxsw_core; 1692 + mlxsw_sp->bus_info = mlxsw_bus_info; 1693 + 1694 + err = mlxsw_sp_base_mac_get(mlxsw_sp); 1695 + if (err) { 1696 + dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 1697 + return err; 1698 + } 1699 + 1700 + err = mlxsw_sp_ports_create(mlxsw_sp); 1701 + if (err) { 1702 + dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 1703 + goto err_ports_create; 1704 + } 1705 + 1706 + err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 1707 + if (err) { 1708 + dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); 1709 + goto err_event_register; 1710 + } 1711 + 1712 + err = mlxsw_sp_traps_init(mlxsw_sp); 1713 + if (err) { 1714 + dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n"); 1715 + goto err_rx_listener_register; 1716 + } 1717 + 1718 + err = mlxsw_sp_flood_init(mlxsw_sp); 1719 + if (err) { 1720 + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n"); 1721 + goto err_flood_init; 1722 + } 1723 + 1724 + err = mlxsw_sp_buffers_init(mlxsw_sp); 1725 + if (err) { 1726 + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 1727 + goto err_buffers_init; 1728 + } 1729 + 1730 + err = mlxsw_sp_switchdev_init(mlxsw_sp); 1731 + if (err) { 1732 + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 1733 + goto err_switchdev_init; 1734 + } 1735 + 1736 + return 0; 1737 + 1738 + err_switchdev_init: 1739 + err_buffers_init: 1740 + err_flood_init: 1741 + mlxsw_sp_traps_fini(mlxsw_sp); 1742 + err_rx_listener_register: 1743 + mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 1744 + err_event_register: 1745 + mlxsw_sp_ports_remove(mlxsw_sp); 1746 + err_ports_create: 1747 + mlxsw_sp_vfids_fini(mlxsw_sp); 1748 + return err; 1749 + } 1750 + 1751 + static void mlxsw_sp_fini(void *priv) 1752 + { 1753 + struct mlxsw_sp *mlxsw_sp = priv; 1754 + 1755 + mlxsw_sp_switchdev_fini(mlxsw_sp); 1756 + mlxsw_sp_traps_fini(mlxsw_sp); 1757 + mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 1758 + mlxsw_sp_ports_remove(mlxsw_sp); 1759 + mlxsw_sp_vfids_fini(mlxsw_sp); 1760 + } 1761 + 1762 + static struct mlxsw_config_profile mlxsw_sp_config_profile = { 1763 + .used_max_vepa_channels = 1, 1764 + .max_vepa_channels = 0, 1765 + .used_max_lag = 1, 1766 + .max_lag = 64, 1767 + .used_max_port_per_lag = 1, 1768 + .max_port_per_lag = 16, 1769 + .used_max_mid = 1, 1770 + .max_mid = 7000, 1771 + .used_max_pgt = 1, 1772 + .max_pgt = 0, 1773 + .used_max_system_port = 1, 1774 + .max_system_port = 64, 1775 + .used_max_vlan_groups = 1, 1776 + .max_vlan_groups = 127, 1777 + .used_max_regions = 1, 1778 + .max_regions = 400, 1779 + .used_flood_tables = 1, 1780 + .used_flood_mode = 1, 1781 + .flood_mode = 3, 1782 + .max_fid_offset_flood_tables = 2, 1783 + .fid_offset_flood_table_size = VLAN_N_VID - 1, 1784 + .max_fid_flood_tables = 1, 1785 + .fid_flood_table_size = VLAN_N_VID, 1786 + .used_max_ib_mc = 1, 1787 + .max_ib_mc = 0, 1788 + .used_max_pkey = 1, 1789 + .max_pkey = 0, 1790 + .swid_config = { 1791 + { 1792 + .used_type = 1, 1793 + .type = MLXSW_PORT_SWID_TYPE_ETH, 1794 + } 1795 + }, 1796 + }; 1797 + 1798 + static struct mlxsw_driver mlxsw_sp_driver = { 1799 + .kind = MLXSW_DEVICE_KIND_SPECTRUM, 1800 + .owner = THIS_MODULE, 1801 + .priv_size = sizeof(struct mlxsw_sp), 1802 + .init = mlxsw_sp_init, 1803 + .fini = mlxsw_sp_fini, 1804 + .txhdr_construct = mlxsw_sp_txhdr_construct, 1805 + .txhdr_len = MLXSW_TXHDR_LEN, 1806 + .profile = &mlxsw_sp_config_profile, 1807 + }; 1808 + 1809 + static bool mlxsw_sp_port_dev_check(const struct net_device *dev) 1810 + { 1811 + return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 1812 + } 1813 + 1814 + static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) 1815 + { 1816 + struct net_device *dev = mlxsw_sp_port->dev; 1817 + int err; 1818 + 1819 + /* When port is not bridged untagged packets are tagged with 1820 + * PVID=VID=1, thereby creating an implicit VLAN interface in 1821 + * the device. Remove it and let bridge code take care of its 1822 + * own VLANs. 1823 + */ 1824 + err = mlxsw_sp_port_kill_vid(dev, 0, 1); 1825 + if (err) 1826 + netdev_err(dev, "Failed to remove VID 1\n"); 1827 + 1828 + return err; 1829 + } 1830 + 1831 + static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) 1832 + { 1833 + struct net_device *dev = mlxsw_sp_port->dev; 1834 + int err; 1835 + 1836 + /* Add implicit VLAN interface in the device, so that untagged 1837 + * packets will be classified to the default vFID. 1838 + */ 1839 + err = mlxsw_sp_port_add_vid(dev, 0, 1); 1840 + if (err) 1841 + netdev_err(dev, "Failed to add VID 1\n"); 1842 + 1843 + return err; 1844 + } 1845 + 1846 + static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, 1847 + struct net_device *br_dev) 1848 + { 1849 + return !mlxsw_sp->master_bridge.dev || 1850 + mlxsw_sp->master_bridge.dev == br_dev; 1851 + } 1852 + 1853 + static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, 1854 + struct net_device *br_dev) 1855 + { 1856 + mlxsw_sp->master_bridge.dev = br_dev; 1857 + mlxsw_sp->master_bridge.ref_count++; 1858 + } 1859 + 1860 + static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, 1861 + struct net_device *br_dev) 1862 + { 1863 + if (--mlxsw_sp->master_bridge.ref_count == 0) 1864 + mlxsw_sp->master_bridge.dev = NULL; 1865 + } 1866 + 1867 + static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 1868 + unsigned long event, void *ptr) 1869 + { 1870 + struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1871 + struct netdev_notifier_changeupper_info *info; 1872 + struct mlxsw_sp_port *mlxsw_sp_port; 1873 + struct net_device *upper_dev; 1874 + struct mlxsw_sp *mlxsw_sp; 1875 + int err; 1876 + 1877 + if (!mlxsw_sp_port_dev_check(dev)) 1878 + return NOTIFY_DONE; 1879 + 1880 + mlxsw_sp_port = netdev_priv(dev); 1881 + mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1882 + info = ptr; 1883 + 1884 + switch (event) { 1885 + case NETDEV_PRECHANGEUPPER: 1886 + upper_dev = info->upper_dev; 1887 + /* HW limitation forbids to put ports to multiple bridges. */ 1888 + if (info->master && info->linking && 1889 + netif_is_bridge_master(upper_dev) && 1890 + !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) 1891 + return NOTIFY_BAD; 1892 + break; 1893 + case NETDEV_CHANGEUPPER: 1894 + upper_dev = info->upper_dev; 1895 + if (info->master && 1896 + netif_is_bridge_master(upper_dev)) { 1897 + if (info->linking) { 1898 + err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); 1899 + if (err) 1900 + netdev_err(dev, "Failed to join bridge\n"); 1901 + mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); 1902 + mlxsw_sp_port->bridged = true; 1903 + } else { 1904 + err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port); 1905 + if (err) 1906 + netdev_err(dev, "Failed to leave bridge\n"); 1907 + mlxsw_sp_port->bridged = false; 1908 + mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); 1909 + } 1910 + } 1911 + break; 1912 + } 1913 + 1914 + return NOTIFY_DONE; 1915 + } 1916 + 1917 + static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { 1918 + .notifier_call = mlxsw_sp_netdevice_event, 1919 + }; 1920 + 1921 + static int __init mlxsw_sp_module_init(void) 1922 + { 1923 + int err; 1924 + 1925 + register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 1926 + err = mlxsw_core_driver_register(&mlxsw_sp_driver); 1927 + if (err) 1928 + goto err_core_driver_register; 1929 + return 0; 1930 + 1931 + err_core_driver_register: 1932 + unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 1933 + return err; 1934 + } 1935 + 1936 + static void __exit mlxsw_sp_module_exit(void) 1937 + { 1938 + mlxsw_core_driver_unregister(&mlxsw_sp_driver); 1939 + unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 1940 + } 1941 + 1942 + module_init(mlxsw_sp_module_init); 1943 + module_exit(mlxsw_sp_module_exit); 1944 + 1945 + MODULE_LICENSE("Dual BSD/GPL"); 1946 + MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 1947 + MODULE_DESCRIPTION("Mellanox Spectrum driver"); 1948 + MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);
+121
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
··· 1 + /* 2 + * drivers/net/ethernet/mellanox/mlxsw/spectrum.h 3 + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 + * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 + * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 + * 8 + * Redistribution and use in source and binary forms, with or without 9 + * modification, are permitted provided that the following conditions are met: 10 + * 11 + * 1. Redistributions of source code must retain the above copyright 12 + * notice, this list of conditions and the following disclaimer. 13 + * 2. Redistributions in binary form must reproduce the above copyright 14 + * notice, this list of conditions and the following disclaimer in the 15 + * documentation and/or other materials provided with the distribution. 16 + * 3. Neither the names of the copyright holders nor the names of its 17 + * contributors may be used to endorse or promote products derived from 18 + * this software without specific prior written permission. 19 + * 20 + * Alternatively, this software may be distributed under the terms of the 21 + * GNU General Public License ("GPL") version 2 as published by the Free 22 + * Software Foundation. 23 + * 24 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 + * POSSIBILITY OF SUCH DAMAGE. 35 + */ 36 + 37 + #ifndef _MLXSW_SPECTRUM_H 38 + #define _MLXSW_SPECTRUM_H 39 + 40 + #include <linux/types.h> 41 + #include <linux/netdevice.h> 42 + #include <linux/bitops.h> 43 + #include <linux/if_vlan.h> 44 + #include <net/switchdev.h> 45 + 46 + #include "core.h" 47 + 48 + #define MLXSW_SP_VFID_BASE VLAN_N_VID 49 + 50 + struct mlxsw_sp_port; 51 + 52 + struct mlxsw_sp { 53 + unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)]; 54 + unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)]; 55 + struct mlxsw_sp_port **ports; 56 + struct mlxsw_core *core; 57 + const struct mlxsw_bus_info *bus_info; 58 + unsigned char base_mac[ETH_ALEN]; 59 + struct { 60 + struct delayed_work dw; 61 + #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100 62 + unsigned int interval; /* ms */ 63 + } fdb_notify; 64 + #define MLXSW_SP_DEFAULT_AGEING_TIME 300 65 + u32 ageing_time; 66 + struct { 67 + struct net_device *dev; 68 + unsigned int ref_count; 69 + } master_bridge; 70 + }; 71 + 72 + struct mlxsw_sp_port_pcpu_stats { 73 + u64 rx_packets; 74 + u64 rx_bytes; 75 + u64 tx_packets; 76 + u64 tx_bytes; 77 + struct u64_stats_sync syncp; 78 + u32 tx_dropped; 79 + }; 80 + 81 + struct mlxsw_sp_port { 82 + struct net_device *dev; 83 + struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; 84 + struct mlxsw_sp *mlxsw_sp; 85 + u8 local_port; 86 + u8 stp_state; 87 + u8 learning:1; 88 + u8 learning_sync:1; 89 + u16 pvid; 90 + bool bridged; 91 + /* 802.1Q bridge VLANs */ 92 + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 93 + /* VLAN interfaces */ 94 + unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)]; 95 + u16 nr_vfids; 96 + }; 97 + 98 + enum mlxsw_sp_flood_table { 99 + MLXSW_SP_FLOOD_TABLE_UC, 100 + MLXSW_SP_FLOOD_TABLE_BM, 101 + }; 102 + 103 + int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp); 104 + int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port); 105 + 106 + int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp); 107 + void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp); 108 + int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port); 109 + void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port); 110 + void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port); 111 + int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, 112 + enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, 113 + u16 vid); 114 + int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 115 + u16 vid_end, bool is_member, bool untagged); 116 + int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, 117 + u16 vid); 118 + int mlxsw_sp_port_kill_vid(struct net_device *dev, 119 + __be16 __always_unused proto, u16 vid); 120 + 121 + #endif
+422
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
··· 1 + /* 2 + * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c 3 + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * 9 + * 1. Redistributions of source code must retain the above copyright 10 + * notice, this list of conditions and the following disclaimer. 11 + * 2. Redistributions in binary form must reproduce the above copyright 12 + * notice, this list of conditions and the following disclaimer in the 13 + * documentation and/or other materials provided with the distribution. 14 + * 3. Neither the names of the copyright holders nor the names of its 15 + * contributors may be used to endorse or promote products derived from 16 + * this software without specific prior written permission. 17 + * 18 + * Alternatively, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") version 2 as published by the Free 20 + * Software Foundation. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 + * POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #include <linux/kernel.h> 36 + #include <linux/types.h> 37 + 38 + #include "spectrum.h" 39 + #include "core.h" 40 + #include "port.h" 41 + #include "reg.h" 42 + 43 + struct mlxsw_sp_pb { 44 + u8 index; 45 + u16 size; 46 + }; 47 + 48 + #define MLXSW_SP_PB(_index, _size) \ 49 + { \ 50 + .index = _index, \ 51 + .size = _size, \ 52 + } 53 + 54 + static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = { 55 + MLXSW_SP_PB(0, 208), 56 + MLXSW_SP_PB(1, 208), 57 + MLXSW_SP_PB(2, 208), 58 + MLXSW_SP_PB(3, 208), 59 + MLXSW_SP_PB(4, 208), 60 + MLXSW_SP_PB(5, 208), 61 + MLXSW_SP_PB(6, 208), 62 + MLXSW_SP_PB(7, 208), 63 + MLXSW_SP_PB(9, 208), 64 + }; 65 + 66 + #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs) 67 + 68 + static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) 69 + { 70 + char pbmc_pl[MLXSW_REG_PBMC_LEN]; 71 + int i; 72 + 73 + mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 74 + 0xffff, 0xffff / 2); 75 + for (i = 0; i < MLXSW_SP_PBS_LEN; i++) { 76 + const struct mlxsw_sp_pb *pb; 77 + 78 + pb = &mlxsw_sp_pbs[i]; 79 + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size); 80 + } 81 + return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, 82 + MLXSW_REG(pbmc), pbmc_pl); 83 + } 84 + 85 + #define MLXSW_SP_SB_BYTES_PER_CELL 96 86 + 87 + struct mlxsw_sp_sb_pool { 88 + u8 pool; 89 + enum mlxsw_reg_sbpr_dir dir; 90 + enum mlxsw_reg_sbpr_mode mode; 91 + u32 size; 92 + }; 93 + 94 + #define MLXSW_SP_SB_POOL_INGRESS_SIZE \ 95 + ((15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) / \ 96 + MLXSW_SP_SB_BYTES_PER_CELL) 97 + #define MLXSW_SP_SB_POOL_EGRESS_SIZE \ 98 + ((14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) / \ 99 + MLXSW_SP_SB_BYTES_PER_CELL) 100 + 101 + #define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size) \ 102 + { \ 103 + .pool = _pool, \ 104 + .dir = _dir, \ 105 + .mode = _mode, \ 106 + .size = _size, \ 107 + } 108 + 109 + #define MLXSW_SP_SB_POOL_INGRESS(_pool, _size) \ 110 + MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_INGRESS, \ 111 + MLXSW_REG_SBPR_MODE_DYNAMIC, _size) 112 + 113 + #define MLXSW_SP_SB_POOL_EGRESS(_pool, _size) \ 114 + MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_EGRESS, \ 115 + MLXSW_REG_SBPR_MODE_DYNAMIC, _size) 116 + 117 + static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = { 118 + MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_SB_POOL_INGRESS_SIZE), 119 + MLXSW_SP_SB_POOL_INGRESS(1, 0), 120 + MLXSW_SP_SB_POOL_INGRESS(2, 0), 121 + MLXSW_SP_SB_POOL_INGRESS(3, 0), 122 + MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_SB_POOL_EGRESS_SIZE), 123 + MLXSW_SP_SB_POOL_EGRESS(1, 0), 124 + MLXSW_SP_SB_POOL_EGRESS(2, 0), 125 + MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_SB_POOL_EGRESS_SIZE), 126 + }; 127 + 128 + #define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools) 129 + 130 + static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp) 131 + { 132 + char sbpr_pl[MLXSW_REG_SBPR_LEN]; 133 + int i; 134 + int err; 135 + 136 + for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) { 137 + const struct mlxsw_sp_sb_pool *pool; 138 + 139 + pool = &mlxsw_sp_sb_pools[i]; 140 + mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir, 141 + pool->mode, pool->size); 142 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl); 143 + if (err) 144 + return err; 145 + } 146 + return 0; 147 + } 148 + 149 + struct mlxsw_sp_sb_cm { 150 + union { 151 + u8 pg; 152 + u8 tc; 153 + } u; 154 + enum mlxsw_reg_sbcm_dir dir; 155 + u32 min_buff; 156 + u32 max_buff; 157 + u8 pool; 158 + }; 159 + 160 + #define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool) \ 161 + { \ 162 + .u.pg = _pg_tc, \ 163 + .dir = _dir, \ 164 + .min_buff = _min_buff, \ 165 + .max_buff = _max_buff, \ 166 + .pool = _pool, \ 167 + } 168 + 169 + #define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff) \ 170 + MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBCM_DIR_INGRESS, \ 171 + _min_buff, _max_buff, 0) 172 + 173 + #define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff) \ 174 + MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, \ 175 + _min_buff, _max_buff, 0) 176 + 177 + #define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc) \ 178 + MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, 104, 2, 3) 179 + 180 + static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = { 181 + MLXSW_SP_SB_CM_INGRESS(0, 10000 / MLXSW_SP_SB_BYTES_PER_CELL, 8), 182 + MLXSW_SP_SB_CM_INGRESS(1, 0, 0), 183 + MLXSW_SP_SB_CM_INGRESS(2, 0, 0), 184 + MLXSW_SP_SB_CM_INGRESS(3, 0, 0), 185 + MLXSW_SP_SB_CM_INGRESS(4, 0, 0), 186 + MLXSW_SP_SB_CM_INGRESS(5, 0, 0), 187 + MLXSW_SP_SB_CM_INGRESS(6, 0, 0), 188 + MLXSW_SP_SB_CM_INGRESS(7, 0, 0), 189 + MLXSW_SP_SB_CM_INGRESS(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff), 190 + MLXSW_SP_SB_CM_EGRESS(0, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), 191 + MLXSW_SP_SB_CM_EGRESS(1, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), 192 + MLXSW_SP_SB_CM_EGRESS(2, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), 193 + MLXSW_SP_SB_CM_EGRESS(3, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), 194 + MLXSW_SP_SB_CM_EGRESS(4, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), 195 + MLXSW_SP_SB_CM_EGRESS(5, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), 196 + MLXSW_SP_SB_CM_EGRESS(6, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), 197 + MLXSW_SP_SB_CM_EGRESS(7, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), 198 + MLXSW_SP_SB_CM_EGRESS(8, 0, 0), 199 + MLXSW_SP_SB_CM_EGRESS(9, 0, 0), 200 + MLXSW_SP_SB_CM_EGRESS(10, 0, 0), 201 + MLXSW_SP_SB_CM_EGRESS(11, 0, 0), 202 + MLXSW_SP_SB_CM_EGRESS(12, 0, 0), 203 + MLXSW_SP_SB_CM_EGRESS(13, 0, 0), 204 + MLXSW_SP_SB_CM_EGRESS(14, 0, 0), 205 + MLXSW_SP_SB_CM_EGRESS(15, 0, 0), 206 + MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff), 207 + }; 208 + 209 + #define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms) 210 + 211 + static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { 212 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0), 213 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1), 214 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2), 215 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3), 216 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4), 217 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5), 218 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6), 219 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7), 220 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8), 221 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9), 222 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10), 223 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11), 224 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12), 225 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13), 226 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14), 227 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15), 228 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16), 229 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17), 230 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18), 231 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19), 232 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20), 233 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21), 234 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22), 235 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23), 236 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24), 237 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25), 238 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26), 239 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27), 240 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28), 241 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29), 242 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30), 243 + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31), 244 + }; 245 + 246 + #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \ 247 + ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms) 248 + 249 + static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, 250 + const struct mlxsw_sp_sb_cm *cms, 251 + size_t cms_len) 252 + { 253 + char sbcm_pl[MLXSW_REG_SBCM_LEN]; 254 + int i; 255 + int err; 256 + 257 + for (i = 0; i < cms_len; i++) { 258 + const struct mlxsw_sp_sb_cm *cm; 259 + 260 + cm = &cms[i]; 261 + mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir, 262 + cm->min_buff, cm->max_buff, cm->pool); 263 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl); 264 + if (err) 265 + return err; 266 + } 267 + return 0; 268 + } 269 + 270 + static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port) 271 + { 272 + return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, 273 + mlxsw_sp_port->local_port, mlxsw_sp_sb_cms, 274 + MLXSW_SP_SB_CMS_LEN); 275 + } 276 + 277 + static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) 278 + { 279 + return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms, 280 + MLXSW_SP_CPU_PORT_SB_MCS_LEN); 281 + } 282 + 283 + struct mlxsw_sp_sb_pm { 284 + u8 pool; 285 + enum mlxsw_reg_sbpm_dir dir; 286 + u32 min_buff; 287 + u32 max_buff; 288 + }; 289 + 290 + #define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff) \ 291 + { \ 292 + .pool = _pool, \ 293 + .dir = _dir, \ 294 + .min_buff = _min_buff, \ 295 + .max_buff = _max_buff, \ 296 + } 297 + 298 + #define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff) \ 299 + MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_INGRESS, \ 300 + _min_buff, _max_buff) 301 + 302 + #define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff) \ 303 + MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_EGRESS, \ 304 + _min_buff, _max_buff) 305 + 306 + static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = { 307 + MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff), 308 + MLXSW_SP_SB_PM_INGRESS(1, 0, 0), 309 + MLXSW_SP_SB_PM_INGRESS(2, 0, 0), 310 + MLXSW_SP_SB_PM_INGRESS(3, 0, 0), 311 + MLXSW_SP_SB_PM_EGRESS(0, 0, 7), 312 + MLXSW_SP_SB_PM_EGRESS(1, 0, 0), 313 + MLXSW_SP_SB_PM_EGRESS(2, 0, 0), 314 + MLXSW_SP_SB_PM_EGRESS(3, 0, 0), 315 + }; 316 + 317 + #define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms) 318 + 319 + static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) 320 + { 321 + char sbpm_pl[MLXSW_REG_SBPM_LEN]; 322 + int i; 323 + int err; 324 + 325 + for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) { 326 + const struct mlxsw_sp_sb_pm *pm; 327 + 328 + pm = &mlxsw_sp_sb_pms[i]; 329 + mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port, 330 + pm->pool, pm->dir, 331 + pm->min_buff, pm->max_buff); 332 + err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, 333 + MLXSW_REG(sbpm), sbpm_pl); 334 + if (err) 335 + return err; 336 + } 337 + return 0; 338 + } 339 + 340 + struct mlxsw_sp_sb_mm { 341 + u8 prio; 342 + u32 min_buff; 343 + u32 max_buff; 344 + u8 pool; 345 + }; 346 + 347 + #define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool) \ 348 + { \ 349 + .prio = _prio, \ 350 + .min_buff = _min_buff, \ 351 + .max_buff = _max_buff, \ 352 + .pool = _pool, \ 353 + } 354 + 355 + static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { 356 + MLXSW_SP_SB_MM(0, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 357 + MLXSW_SP_SB_MM(1, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 358 + MLXSW_SP_SB_MM(2, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 359 + MLXSW_SP_SB_MM(3, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 360 + MLXSW_SP_SB_MM(4, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 361 + MLXSW_SP_SB_MM(5, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 362 + MLXSW_SP_SB_MM(6, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 363 + MLXSW_SP_SB_MM(7, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 364 + MLXSW_SP_SB_MM(8, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 365 + MLXSW_SP_SB_MM(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 366 + MLXSW_SP_SB_MM(10, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 367 + MLXSW_SP_SB_MM(11, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 368 + MLXSW_SP_SB_MM(12, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 369 + MLXSW_SP_SB_MM(13, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 370 + MLXSW_SP_SB_MM(14, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 371 + }; 372 + 373 + #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms) 374 + 375 + static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) 376 + { 377 + char sbmm_pl[MLXSW_REG_SBMM_LEN]; 378 + int i; 379 + int err; 380 + 381 + for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) { 382 + const struct mlxsw_sp_sb_mm *mc; 383 + 384 + mc = &mlxsw_sp_sb_mms[i]; 385 + mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff, 386 + mc->max_buff, mc->pool); 387 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl); 388 + if (err) 389 + return err; 390 + } 391 + return 0; 392 + } 393 + 394 + int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) 395 + { 396 + int err; 397 + 398 + err = mlxsw_sp_sb_pools_init(mlxsw_sp); 399 + if (err) 400 + return err; 401 + err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp); 402 + if (err) 403 + return err; 404 + err = mlxsw_sp_sb_mms_init(mlxsw_sp); 405 + 406 + return err; 407 + } 408 + 409 + int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) 410 + { 411 + int err; 412 + 413 + err = mlxsw_sp_port_pb_init(mlxsw_sp_port); 414 + if (err) 415 + return err; 416 + err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port); 417 + if (err) 418 + return err; 419 + err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port); 420 + 421 + return err; 422 + }
+863
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
··· 1 + /* 2 + * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c 3 + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 + * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 + * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 + * 8 + * Redistribution and use in source and binary forms, with or without 9 + * modification, are permitted provided that the following conditions are met: 10 + * 11 + * 1. Redistributions of source code must retain the above copyright 12 + * notice, this list of conditions and the following disclaimer. 13 + * 2. Redistributions in binary form must reproduce the above copyright 14 + * notice, this list of conditions and the following disclaimer in the 15 + * documentation and/or other materials provided with the distribution. 16 + * 3. Neither the names of the copyright holders nor the names of its 17 + * contributors may be used to endorse or promote products derived from 18 + * this software without specific prior written permission. 19 + * 20 + * Alternatively, this software may be distributed under the terms of the 21 + * GNU General Public License ("GPL") version 2 as published by the Free 22 + * Software Foundation. 23 + * 24 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 + * POSSIBILITY OF SUCH DAMAGE. 35 + */ 36 + 37 + #include <linux/kernel.h> 38 + #include <linux/types.h> 39 + #include <linux/netdevice.h> 40 + #include <linux/etherdevice.h> 41 + #include <linux/slab.h> 42 + #include <linux/device.h> 43 + #include <linux/skbuff.h> 44 + #include <linux/if_vlan.h> 45 + #include <linux/if_bridge.h> 46 + #include <linux/workqueue.h> 47 + #include <linux/jiffies.h> 48 + #include <net/switchdev.h> 49 + 50 + #include "spectrum.h" 51 + #include "core.h" 52 + #include "reg.h" 53 + 54 + static int mlxsw_sp_port_attr_get(struct net_device *dev, 55 + struct switchdev_attr *attr) 56 + { 57 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 58 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 59 + 60 + switch (attr->id) { 61 + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: 62 + attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); 63 + memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac, 64 + attr->u.ppid.id_len); 65 + break; 66 + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 67 + attr->u.brport_flags = 68 + (mlxsw_sp_port->learning ? BR_LEARNING : 0) | 69 + (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0); 70 + break; 71 + default: 72 + return -EOPNOTSUPP; 73 + } 74 + 75 + return 0; 76 + } 77 + 78 + static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 79 + u8 state) 80 + { 81 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 82 + enum mlxsw_reg_spms_state spms_state; 83 + char *spms_pl; 84 + u16 vid; 85 + int err; 86 + 87 + switch (state) { 88 + case BR_STATE_DISABLED: /* fall-through */ 89 + case BR_STATE_FORWARDING: 90 + spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; 91 + break; 92 + case BR_STATE_LISTENING: /* fall-through */ 93 + case BR_STATE_LEARNING: 94 + spms_state = MLXSW_REG_SPMS_STATE_LEARNING; 95 + break; 96 + case BR_STATE_BLOCKING: 97 + spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; 98 + break; 99 + default: 100 + BUG(); 101 + } 102 + 103 + spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 104 + if (!spms_pl) 105 + return -ENOMEM; 106 + mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 107 + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) 108 + mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 109 + 110 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 111 + kfree(spms_pl); 112 + return err; 113 + } 114 + 115 + static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 116 + struct switchdev_trans *trans, 117 + u8 state) 118 + { 119 + if (switchdev_trans_ph_prepare(trans)) 120 + return 0; 121 + 122 + mlxsw_sp_port->stp_state = state; 123 + return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); 124 + } 125 + 126 + static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, 127 + struct switchdev_trans *trans, 128 + unsigned long brport_flags) 129 + { 130 + if (switchdev_trans_ph_prepare(trans)) 131 + return 0; 132 + 133 + mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0; 134 + mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0; 135 + return 0; 136 + } 137 + 138 + static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) 139 + { 140 + char sfdat_pl[MLXSW_REG_SFDAT_LEN]; 141 + int err; 142 + 143 + mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time); 144 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl); 145 + if (err) 146 + return err; 147 + mlxsw_sp->ageing_time = ageing_time; 148 + return 0; 149 + } 150 + 151 + static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, 152 + struct switchdev_trans *trans, 153 + unsigned long ageing_jiffies) 154 + { 155 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 156 + u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; 157 + 158 + if (switchdev_trans_ph_prepare(trans)) 159 + return 0; 160 + 161 + return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); 162 + } 163 + 164 + static int mlxsw_sp_port_attr_set(struct net_device *dev, 165 + const struct switchdev_attr *attr, 166 + struct switchdev_trans *trans) 167 + { 168 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 169 + int err = 0; 170 + 171 + switch (attr->id) { 172 + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 173 + err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, 174 + attr->u.stp_state); 175 + break; 176 + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 177 + err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans, 178 + attr->u.brport_flags); 179 + break; 180 + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 181 + err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, 182 + attr->u.ageing_time); 183 + break; 184 + default: 185 + err = -EOPNOTSUPP; 186 + break; 187 + } 188 + 189 + return err; 190 + } 191 + 192 + static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 193 + { 194 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 195 + char spvid_pl[MLXSW_REG_SPVID_LEN]; 196 + 197 + mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 198 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 199 + } 200 + 201 + static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) 202 + { 203 + char sfmr_pl[MLXSW_REG_SFMR_LEN]; 204 + int err; 205 + 206 + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid); 207 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 208 + 209 + if (err) 210 + return err; 211 + 212 + set_bit(fid, mlxsw_sp->active_fids); 213 + return 0; 214 + } 215 + 216 + static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid) 217 + { 218 + char sfmr_pl[MLXSW_REG_SFMR_LEN]; 219 + 220 + clear_bit(fid, mlxsw_sp->active_fids); 221 + 222 + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, 223 + fid, fid); 224 + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 225 + } 226 + 227 + static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) 228 + { 229 + enum mlxsw_reg_svfa_mt mt; 230 + 231 + if (mlxsw_sp_port->nr_vfids) 232 + mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 233 + else 234 + mt = MLXSW_REG_SVFA_MT_VID_TO_FID; 235 + 236 + return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid); 237 + } 238 + 239 + static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) 240 + { 241 + enum mlxsw_reg_svfa_mt mt; 242 + 243 + if (!mlxsw_sp_port->nr_vfids) 244 + return 0; 245 + 246 + mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 247 + return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid); 248 + } 249 + 250 + static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 251 + u16 fid, bool set, bool only_uc) 252 + { 253 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 254 + char *sftr_pl; 255 + int err; 256 + 257 + sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); 258 + if (!sftr_pl) 259 + return -ENOMEM; 260 + 261 + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid, 262 + MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, 0, 263 + mlxsw_sp_port->local_port, set); 264 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 265 + if (err) 266 + goto buffer_out; 267 + 268 + /* Flooding control allows one to decide whether a given port will 269 + * flood unicast traffic for which there is no FDB entry. 270 + */ 271 + if (only_uc) 272 + goto buffer_out; 273 + 274 + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid, 275 + MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, 0, 276 + mlxsw_sp_port->local_port, set); 277 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 278 + 279 + buffer_out: 280 + kfree(sftr_pl); 281 + return err; 282 + } 283 + 284 + static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin, 285 + u16 vid_end) 286 + { 287 + u16 vid; 288 + int err; 289 + 290 + for (vid = vid_begin; vid <= vid_end; vid++) { 291 + err = mlxsw_sp_port_add_vid(dev, 0, vid); 292 + if (err) 293 + goto err_port_add_vid; 294 + } 295 + return 0; 296 + 297 + err_port_add_vid: 298 + for (vid--; vid >= vid_begin; vid--) 299 + mlxsw_sp_port_kill_vid(dev, 0, vid); 300 + return err; 301 + } 302 + 303 + static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, 304 + u16 vid_begin, u16 vid_end, 305 + bool flag_untagged, bool flag_pvid) 306 + { 307 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 308 + struct net_device *dev = mlxsw_sp_port->dev; 309 + enum mlxsw_reg_svfa_mt mt; 310 + u16 vid, vid_e; 311 + int err; 312 + 313 + /* In case this is invoked with BRIDGE_FLAGS_SELF and port is 314 + * not bridged, then packets ingressing through the port with 315 + * the specified VIDs will be directed to CPU. 316 + */ 317 + if (!mlxsw_sp_port->bridged) 318 + return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end); 319 + 320 + for (vid = vid_begin; vid <= vid_end; vid++) { 321 + if (!test_bit(vid, mlxsw_sp->active_fids)) { 322 + err = mlxsw_sp_fid_create(mlxsw_sp, vid); 323 + if (err) { 324 + netdev_err(dev, "Failed to create FID=%d\n", 325 + vid); 326 + return err; 327 + } 328 + 329 + /* When creating a FID, we set a VID to FID mapping 330 + * regardless of the port's mode. 331 + */ 332 + mt = MLXSW_REG_SVFA_MT_VID_TO_FID; 333 + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, 334 + true, vid, vid); 335 + if (err) { 336 + netdev_err(dev, "Failed to create FID=VID=%d mapping\n", 337 + vid); 338 + return err; 339 + } 340 + } 341 + 342 + /* Set FID mapping according to port's mode */ 343 + err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid); 344 + if (err) { 345 + netdev_err(dev, "Failed to map FID=%d", vid); 346 + return err; 347 + } 348 + 349 + err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, true, 350 + false); 351 + if (err) { 352 + netdev_err(dev, "Failed to set flooding for FID=%d", 353 + vid); 354 + return err; 355 + } 356 + } 357 + 358 + for (vid = vid_begin; vid <= vid_end; 359 + vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 360 + vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 361 + vid_end); 362 + 363 + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, true, 364 + flag_untagged); 365 + if (err) { 366 + netdev_err(mlxsw_sp_port->dev, "Unable to add VIDs %d-%d\n", 367 + vid, vid_e); 368 + return err; 369 + } 370 + } 371 + 372 + vid = vid_begin; 373 + if (flag_pvid && mlxsw_sp_port->pvid != vid) { 374 + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 375 + if (err) { 376 + netdev_err(mlxsw_sp_port->dev, "Unable to add PVID %d\n", 377 + vid); 378 + return err; 379 + } 380 + mlxsw_sp_port->pvid = vid; 381 + } 382 + 383 + /* Changing activity bits only if HW operation succeded */ 384 + for (vid = vid_begin; vid <= vid_end; vid++) 385 + set_bit(vid, mlxsw_sp_port->active_vlans); 386 + 387 + return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, 388 + mlxsw_sp_port->stp_state); 389 + } 390 + 391 + static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, 392 + const struct switchdev_obj_port_vlan *vlan, 393 + struct switchdev_trans *trans) 394 + { 395 + bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 396 + bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID; 397 + 398 + if (switchdev_trans_ph_prepare(trans)) 399 + return 0; 400 + 401 + return __mlxsw_sp_port_vlans_add(mlxsw_sp_port, 402 + vlan->vid_begin, vlan->vid_end, 403 + untagged_flag, pvid_flag); 404 + } 405 + 406 + static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port *mlxsw_sp_port, 407 + const char *mac, u16 vid, bool adding, 408 + bool dynamic) 409 + { 410 + enum mlxsw_reg_sfd_rec_policy policy; 411 + enum mlxsw_reg_sfd_op op; 412 + char *sfd_pl; 413 + int err; 414 + 415 + if (!vid) 416 + vid = mlxsw_sp_port->pvid; 417 + 418 + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 419 + if (!sfd_pl) 420 + return -ENOMEM; 421 + 422 + policy = dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : 423 + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; 424 + op = adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : 425 + MLXSW_REG_SFD_OP_WRITE_REMOVE; 426 + mlxsw_reg_sfd_pack(sfd_pl, op, 0); 427 + mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, 428 + mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP, 429 + mlxsw_sp_port->local_port); 430 + err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sfd), 431 + sfd_pl); 432 + kfree(sfd_pl); 433 + 434 + return err; 435 + } 436 + 437 + static int 438 + mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port, 439 + const struct switchdev_obj_port_fdb *fdb, 440 + struct switchdev_trans *trans) 441 + { 442 + if (switchdev_trans_ph_prepare(trans)) 443 + return 0; 444 + 445 + return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid, 446 + true, false); 447 + } 448 + 449 + static int mlxsw_sp_port_obj_add(struct net_device *dev, 450 + const struct switchdev_obj *obj, 451 + struct switchdev_trans *trans) 452 + { 453 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 454 + int err = 0; 455 + 456 + switch (obj->id) { 457 + case SWITCHDEV_OBJ_ID_PORT_VLAN: 458 + err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, 459 + SWITCHDEV_OBJ_PORT_VLAN(obj), 460 + trans); 461 + break; 462 + case SWITCHDEV_OBJ_ID_PORT_FDB: 463 + err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port, 464 + SWITCHDEV_OBJ_PORT_FDB(obj), 465 + trans); 466 + break; 467 + default: 468 + err = -EOPNOTSUPP; 469 + break; 470 + } 471 + 472 + return err; 473 + } 474 + 475 + static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin, 476 + u16 vid_end) 477 + { 478 + u16 vid; 479 + int err; 480 + 481 + for (vid = vid_begin; vid <= vid_end; vid++) { 482 + err = mlxsw_sp_port_kill_vid(dev, 0, vid); 483 + if (err) 484 + return err; 485 + } 486 + 487 + return 0; 488 + } 489 + 490 + static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 491 + u16 vid_begin, u16 vid_end, bool init) 492 + { 493 + struct net_device *dev = mlxsw_sp_port->dev; 494 + u16 vid, vid_e; 495 + int err; 496 + 497 + /* In case this is invoked with BRIDGE_FLAGS_SELF and port is 498 + * not bridged, then prevent packets ingressing through the 499 + * port with the specified VIDs from being trapped to CPU. 500 + */ 501 + if (!init && !mlxsw_sp_port->bridged) 502 + return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end); 503 + 504 + for (vid = vid_begin; vid <= vid_end; 505 + vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 506 + vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 507 + vid_end); 508 + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, false, 509 + false); 510 + if (err) { 511 + netdev_err(mlxsw_sp_port->dev, "Unable to del VIDs %d-%d\n", 512 + vid, vid_e); 513 + return err; 514 + } 515 + } 516 + 517 + if ((mlxsw_sp_port->pvid >= vid_begin) && 518 + (mlxsw_sp_port->pvid <= vid_end)) { 519 + /* Default VLAN is always 1 */ 520 + mlxsw_sp_port->pvid = 1; 521 + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 522 + mlxsw_sp_port->pvid); 523 + if (err) { 524 + netdev_err(mlxsw_sp_port->dev, "Unable to del PVID %d\n", 525 + vid); 526 + return err; 527 + } 528 + } 529 + 530 + if (init) 531 + goto out; 532 + 533 + for (vid = vid_begin; vid <= vid_end; vid++) { 534 + err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, false, 535 + false); 536 + if (err) { 537 + netdev_err(dev, "Failed to clear flooding for FID=%d", 538 + vid); 539 + return err; 540 + } 541 + 542 + /* Remove FID mapping in case of Virtual mode */ 543 + err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); 544 + if (err) { 545 + netdev_err(dev, "Failed to unmap FID=%d", vid); 546 + return err; 547 + } 548 + } 549 + 550 + out: 551 + /* Changing activity bits only if HW operation succeded */ 552 + for (vid = vid_begin; vid <= vid_end; vid++) 553 + clear_bit(vid, mlxsw_sp_port->active_vlans); 554 + 555 + return 0; 556 + } 557 + 558 + static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 559 + const struct switchdev_obj_port_vlan *vlan) 560 + { 561 + return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 562 + vlan->vid_begin, vlan->vid_end, false); 563 + } 564 + 565 + static int 566 + mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, 567 + const struct switchdev_obj_port_fdb *fdb) 568 + { 569 + return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid, 570 + false, false); 571 + } 572 + 573 + static int mlxsw_sp_port_obj_del(struct net_device *dev, 574 + const struct switchdev_obj *obj) 575 + { 576 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 577 + int err = 0; 578 + 579 + switch (obj->id) { 580 + case SWITCHDEV_OBJ_ID_PORT_VLAN: 581 + err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, 582 + SWITCHDEV_OBJ_PORT_VLAN(obj)); 583 + break; 584 + case SWITCHDEV_OBJ_ID_PORT_FDB: 585 + err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, 586 + SWITCHDEV_OBJ_PORT_FDB(obj)); 587 + break; 588 + default: 589 + err = -EOPNOTSUPP; 590 + break; 591 + } 592 + 593 + return err; 594 + } 595 + 596 + static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, 597 + struct switchdev_obj_port_fdb *fdb, 598 + switchdev_obj_dump_cb_t *cb) 599 + { 600 + char *sfd_pl; 601 + char mac[ETH_ALEN]; 602 + u16 vid; 603 + u8 local_port; 604 + u8 num_rec; 605 + int stored_err = 0; 606 + int i; 607 + int err; 608 + 609 + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 610 + if (!sfd_pl) 611 + return -ENOMEM; 612 + 613 + mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); 614 + do { 615 + mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT); 616 + err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core, 617 + MLXSW_REG(sfd), sfd_pl); 618 + if (err) 619 + goto out; 620 + 621 + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 622 + 623 + /* Even in case of error, we have to run the dump to the end 624 + * so the session in firmware is finished. 625 + */ 626 + if (stored_err) 627 + continue; 628 + 629 + for (i = 0; i < num_rec; i++) { 630 + switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) { 631 + case MLXSW_REG_SFD_REC_TYPE_UNICAST: 632 + mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid, 633 + &local_port); 634 + if (local_port == mlxsw_sp_port->local_port) { 635 + ether_addr_copy(fdb->addr, mac); 636 + fdb->ndm_state = NUD_REACHABLE; 637 + fdb->vid = vid; 638 + err = cb(&fdb->obj); 639 + if (err) 640 + stored_err = err; 641 + } 642 + } 643 + } 644 + } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT); 645 + 646 + out: 647 + kfree(sfd_pl); 648 + return stored_err ? stored_err : err; 649 + } 650 + 651 + static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port, 652 + struct switchdev_obj_port_vlan *vlan, 653 + switchdev_obj_dump_cb_t *cb) 654 + { 655 + u16 vid; 656 + int err = 0; 657 + 658 + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 659 + vlan->flags = 0; 660 + if (vid == mlxsw_sp_port->pvid) 661 + vlan->flags |= BRIDGE_VLAN_INFO_PVID; 662 + vlan->vid_begin = vid; 663 + vlan->vid_end = vid; 664 + err = cb(&vlan->obj); 665 + if (err) 666 + break; 667 + } 668 + return err; 669 + } 670 + 671 + static int mlxsw_sp_port_obj_dump(struct net_device *dev, 672 + struct switchdev_obj *obj, 673 + switchdev_obj_dump_cb_t *cb) 674 + { 675 + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 676 + int err = 0; 677 + 678 + switch (obj->id) { 679 + case SWITCHDEV_OBJ_ID_PORT_VLAN: 680 + err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port, 681 + SWITCHDEV_OBJ_PORT_VLAN(obj), cb); 682 + break; 683 + case SWITCHDEV_OBJ_ID_PORT_FDB: 684 + err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port, 685 + SWITCHDEV_OBJ_PORT_FDB(obj), cb); 686 + break; 687 + default: 688 + err = -EOPNOTSUPP; 689 + break; 690 + } 691 + 692 + return err; 693 + } 694 + 695 + const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { 696 + .switchdev_port_attr_get = mlxsw_sp_port_attr_get, 697 + .switchdev_port_attr_set = mlxsw_sp_port_attr_set, 698 + .switchdev_port_obj_add = mlxsw_sp_port_obj_add, 699 + .switchdev_port_obj_del = mlxsw_sp_port_obj_del, 700 + .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, 701 + }; 702 + 703 + static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, 704 + char *sfn_pl, int rec_index, 705 + bool adding) 706 + { 707 + struct mlxsw_sp_port *mlxsw_sp_port; 708 + char mac[ETH_ALEN]; 709 + u8 local_port; 710 + u16 vid; 711 + int err; 712 + 713 + mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port); 714 + mlxsw_sp_port = mlxsw_sp->ports[local_port]; 715 + if (!mlxsw_sp_port) { 716 + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); 717 + return; 718 + } 719 + 720 + err = mlxsw_sp_port_fdb_op(mlxsw_sp_port, mac, vid, 721 + adding && mlxsw_sp_port->learning, true); 722 + if (err) { 723 + if (net_ratelimit()) 724 + netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); 725 + return; 726 + } 727 + 728 + if (mlxsw_sp_port->learning && mlxsw_sp_port->learning_sync) { 729 + struct switchdev_notifier_fdb_info info; 730 + unsigned long notifier_type; 731 + 732 + info.addr = mac; 733 + info.vid = vid; 734 + notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; 735 + call_switchdev_notifiers(notifier_type, mlxsw_sp_port->dev, 736 + &info.info); 737 + } 738 + } 739 + 740 + static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, 741 + char *sfn_pl, int rec_index) 742 + { 743 + switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) { 744 + case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC: 745 + mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 746 + rec_index, true); 747 + break; 748 + case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC: 749 + mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 750 + rec_index, false); 751 + break; 752 + } 753 + } 754 + 755 + static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) 756 + { 757 + schedule_delayed_work(&mlxsw_sp->fdb_notify.dw, 758 + msecs_to_jiffies(mlxsw_sp->fdb_notify.interval)); 759 + } 760 + 761 + static void mlxsw_sp_fdb_notify_work(struct work_struct *work) 762 + { 763 + struct mlxsw_sp *mlxsw_sp; 764 + char *sfn_pl; 765 + u8 num_rec; 766 + int i; 767 + int err; 768 + 769 + sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL); 770 + if (!sfn_pl) 771 + return; 772 + 773 + mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); 774 + 775 + do { 776 + mlxsw_reg_sfn_pack(sfn_pl); 777 + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); 778 + if (err) { 779 + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); 780 + break; 781 + } 782 + num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); 783 + for (i = 0; i < num_rec; i++) 784 + mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); 785 + 786 + } while (num_rec); 787 + 788 + kfree(sfn_pl); 789 + mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 790 + } 791 + 792 + static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) 793 + { 794 + int err; 795 + 796 + err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME); 797 + if (err) { 798 + dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); 799 + return err; 800 + } 801 + INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work); 802 + mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; 803 + mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 804 + return 0; 805 + } 806 + 807 + static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) 808 + { 809 + cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw); 810 + } 811 + 812 + static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp) 813 + { 814 + u16 fid; 815 + 816 + for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID) 817 + mlxsw_sp_fid_destroy(mlxsw_sp, fid); 818 + } 819 + 820 + int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 821 + { 822 + return mlxsw_sp_fdb_init(mlxsw_sp); 823 + } 824 + 825 + void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 826 + { 827 + mlxsw_sp_fdb_fini(mlxsw_sp); 828 + mlxsw_sp_fids_fini(mlxsw_sp); 829 + } 830 + 831 + int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) 832 + { 833 + struct net_device *dev = mlxsw_sp_port->dev; 834 + int err; 835 + 836 + /* Allow only untagged packets to ingress and tag them internally 837 + * with VID 1. 838 + */ 839 + mlxsw_sp_port->pvid = 1; 840 + err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true); 841 + if (err) { 842 + netdev_err(dev, "Unable to init VLANs\n"); 843 + return err; 844 + } 845 + 846 + /* Add implicit VLAN interface in the device, so that untagged 847 + * packets will be classified to the default vFID. 848 + */ 849 + err = mlxsw_sp_port_add_vid(dev, 0, 1); 850 + if (err) 851 + netdev_err(dev, "Failed to configure default vFID\n"); 852 + 853 + return err; 854 + } 855 + 856 + void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) 857 + { 858 + mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; 859 + } 860 + 861 + void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port) 862 + { 863 + }
+2 -1
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
··· 1377 1377 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); 1378 1378 if (!sftr_pl) 1379 1379 return -ENOMEM; 1380 - mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0); 1380 + mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0, 1381 + MLXSW_PORT_CPU_PORT, true); 1381 1382 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl); 1382 1383 kfree(sftr_pl); 1383 1384 if (err)
+1
drivers/net/ethernet/mellanox/mlxsw/txheader.h
··· 38 38 39 39 #define MLXSW_TXHDR_LEN 0x10 40 40 #define MLXSW_TXHDR_VERSION_0 0 41 + #define MLXSW_TXHDR_VERSION_1 1 41 42 42 43 enum { 43 44 MLXSW_TXHDR_ETH_CTL,
+1
include/linux/netdevice.h
··· 2106 2106 #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ 2107 2107 #define NETDEV_CHANGEINFODATA 0x0018 2108 2108 #define NETDEV_BONDING_INFO 0x0019 2109 + #define NETDEV_PRECHANGEUPPER 0x001A 2109 2110 2110 2111 int register_netdevice_notifier(struct notifier_block *nb); 2111 2112 int unregister_netdevice_notifier(struct notifier_block *nb);
+9
net/core/dev.c
··· 5346 5346 changeupper_info.master = master; 5347 5347 changeupper_info.linking = true; 5348 5348 5349 + ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev, 5350 + &changeupper_info.info); 5351 + ret = notifier_to_errno(ret); 5352 + if (ret) 5353 + return ret; 5354 + 5349 5355 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private, 5350 5356 master); 5351 5357 if (ret) ··· 5493 5487 changeupper_info.upper_dev = upper_dev; 5494 5488 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; 5495 5489 changeupper_info.linking = false; 5490 + 5491 + call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev, 5492 + &changeupper_info.info); 5496 5493 5497 5494 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 5498 5495