Lines Matching refs:dev

56 	struct mlx4_ib_dev     *dev ;  member
73 static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
76 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, in mlx4_ib_update_cache_on_guid_change() argument
84 if (!mlx4_is_master(dev->dev)) in mlx4_ib_update_cache_on_guid_change()
87 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. in mlx4_ib_update_cache_on_guid_change()
98 if (slave_id >= dev->dev->num_slaves) { in mlx4_ib_update_cache_on_guid_change()
104 memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id], in mlx4_ib_update_cache_on_guid_change()
113 static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index) in get_cached_alias_guid() argument
119 return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index]; in get_cached_alias_guid()
128 void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave, in mlx4_ib_slave_alias_guid_event() argument
138 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); in mlx4_ib_slave_alias_guid_event()
139 if (dev->sriov.alias_guid.ports_guid[port_index].state_flags & in mlx4_ib_slave_alias_guid_event()
143 curr_guid = *(__be64 *)&dev->sriov. in mlx4_ib_slave_alias_guid_event()
152 required_guid = mlx4_get_admin_guid(dev->dev, slave, port); in mlx4_ib_slave_alias_guid_event()
156 *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index]. in mlx4_ib_slave_alias_guid_event()
159 dev->sriov.alias_guid.ports_guid[port_index]. in mlx4_ib_slave_alias_guid_event()
162 dev->sriov.alias_guid.ports_guid[port_index]. in mlx4_ib_slave_alias_guid_event()
166 dev->sriov.alias_guid.ports_guid[port_index]. in mlx4_ib_slave_alias_guid_event()
168 dev->sriov.alias_guid.ports_guid[port_index]. in mlx4_ib_slave_alias_guid_event()
173 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); in mlx4_ib_slave_alias_guid_event()
176 mlx4_ib_init_alias_guid_work(dev, port_index); in mlx4_ib_slave_alias_guid_event()
188 void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, in mlx4_ib_notify_slaves_on_guid_change() argument
203 if (!mlx4_is_master(dev->dev)) in mlx4_ib_notify_slaves_on_guid_change()
206 rec = &dev->sriov.alias_guid.ports_guid[port_num - 1]. in mlx4_ib_notify_slaves_on_guid_change()
208 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. in mlx4_ib_notify_slaves_on_guid_change()
221 if (slave_id >= dev->dev->persist->num_vfs + 1) in mlx4_ib_notify_slaves_on_guid_change()
224 slave_port = mlx4_phys_to_slave_port(dev->dev, slave_id, port_num); in mlx4_ib_notify_slaves_on_guid_change()
229 form_cache_ag = get_cached_alias_guid(dev, port_num, in mlx4_ib_notify_slaves_on_guid_change()
239 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); in mlx4_ib_notify_slaves_on_guid_change()
251 spin_unlock_irqrestore(&dev->sriov. in mlx4_ib_notify_slaves_on_guid_change()
256 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, in mlx4_ib_notify_slaves_on_guid_change()
258 mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); in mlx4_ib_notify_slaves_on_guid_change()
262 prev_state = mlx4_get_slave_port_state(dev->dev, slave_id, port_num); in mlx4_ib_notify_slaves_on_guid_change()
263 new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num, in mlx4_ib_notify_slaves_on_guid_change()
272 mlx4_gen_port_state_change_eqe(dev->dev, slave_id, in mlx4_ib_notify_slaves_on_guid_change()
276 set_and_calc_slave_port_state(dev->dev, slave_id, port_num, in mlx4_ib_notify_slaves_on_guid_change()
282 mlx4_gen_port_state_change_eqe(dev->dev, in mlx4_ib_notify_slaves_on_guid_change()
295 struct mlx4_ib_dev *dev; in aliasguid_query_handler() local
308 dev = cb_ctx->dev; in aliasguid_query_handler()
310 rec = &dev->sriov.alias_guid.ports_guid[port_index]. in aliasguid_query_handler()
330 rec = &dev->sriov.alias_guid.ports_guid[port_index]. in aliasguid_query_handler()
333 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); in aliasguid_query_handler()
360 mlx4_ib_warn(&dev->ib_dev, in aliasguid_query_handler()
374 mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" in aliasguid_query_handler()
389 mlx4_set_admin_guid(dev->dev, in aliasguid_query_handler()
427 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); in aliasguid_query_handler()
433 mlx4_ib_notify_slaves_on_guid_change(dev, guid_rec->block_num, in aliasguid_query_handler()
437 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in aliasguid_query_handler()
438 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in aliasguid_query_handler()
439 if (!dev->sriov.is_going_down) { in aliasguid_query_handler()
440 get_low_record_time_index(dev, port_index, &resched_delay_sec); in aliasguid_query_handler()
441 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, in aliasguid_query_handler()
442 &dev->sriov.alias_guid.ports_guid[port_index]. in aliasguid_query_handler()
451 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in aliasguid_query_handler()
452 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in aliasguid_query_handler()
455 static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) in invalidate_guid_record() argument
461 dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status in invalidate_guid_record()
467 *(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. in invalidate_guid_record()
480 dev->sriov.alias_guid.ports_guid[port - 1]. in invalidate_guid_record()
482 if (dev->sriov.alias_guid.ports_guid[port - 1]. in invalidate_guid_record()
484 dev->sriov.alias_guid.ports_guid[port - 1]. in invalidate_guid_record()
493 struct mlx4_ib_dev *dev = to_mdev(ibdev); in set_guid_rec() local
503 &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; in set_guid_rec()
526 callback_context->dev = dev; in set_guid_rec()
542 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
544 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
547 ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, in set_guid_rec()
557 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
560 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
569 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in set_guid_rec()
570 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
571 invalidate_guid_record(dev, port, index); in set_guid_rec()
572 if (!dev->sriov.is_going_down) { in set_guid_rec()
573 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, in set_guid_rec()
574 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work, in set_guid_rec()
577 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
578 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in set_guid_rec()
584 static void mlx4_ib_guid_port_init(struct mlx4_ib_dev *dev, int port) in mlx4_ib_guid_port_init() argument
594 if (!entry || entry > dev->dev->persist->num_vfs || in mlx4_ib_guid_port_init()
595 !mlx4_is_slave_active(dev->dev, entry)) in mlx4_ib_guid_port_init()
597 guid = mlx4_get_admin_guid(dev->dev, entry, port); in mlx4_ib_guid_port_init()
598 *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. in mlx4_ib_guid_port_init()
608 void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) in mlx4_ib_invalidate_all_guid_record() argument
615 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in mlx4_ib_invalidate_all_guid_record()
616 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in mlx4_ib_invalidate_all_guid_record()
618 if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags & in mlx4_ib_invalidate_all_guid_record()
620 mlx4_ib_guid_port_init(dev, port); in mlx4_ib_invalidate_all_guid_record()
621 dev->sriov.alias_guid.ports_guid[port - 1].state_flags &= in mlx4_ib_invalidate_all_guid_record()
625 invalidate_guid_record(dev, port, i); in mlx4_ib_invalidate_all_guid_record()
627 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) { in mlx4_ib_invalidate_all_guid_record()
633 cancel_delayed_work(&dev->sriov.alias_guid. in mlx4_ib_invalidate_all_guid_record()
635 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, in mlx4_ib_invalidate_all_guid_record()
636 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work, in mlx4_ib_invalidate_all_guid_record()
639 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in mlx4_ib_invalidate_all_guid_record()
640 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in mlx4_ib_invalidate_all_guid_record()
643 static void set_required_record(struct mlx4_ib_dev *dev, u8 port, in set_required_record() argument
653 &dev->sriov.alias_guid.ports_guid[port]. in set_required_record()
693 static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, in get_low_record_time_index() argument
702 rec = dev->sriov.alias_guid.ports_guid[port]. in get_low_record_time_index()
725 static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, in get_next_record_to_update() argument
732 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); in get_next_record_to_update()
733 record_index = get_low_record_time_index(dev, port, NULL); in get_next_record_to_update()
740 set_required_record(dev, port, rec, record_index); in get_next_record_to_update()
742 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); in get_next_record_to_update()
758 struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov); in alias_guid_work() local
767 ret = get_next_record_to_update(dev, sriov_alias_port->port, rec); in alias_guid_work()
773 set_guid_rec(&dev->ib_dev, rec); in alias_guid_work()
779 void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port) in mlx4_ib_init_alias_guid_work() argument
783 if (!mlx4_is_master(dev->dev)) in mlx4_ib_init_alias_guid_work()
785 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in mlx4_ib_init_alias_guid_work()
786 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in mlx4_ib_init_alias_guid_work()
787 if (!dev->sriov.is_going_down) { in mlx4_ib_init_alias_guid_work()
792 cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port]. in mlx4_ib_init_alias_guid_work()
794 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, in mlx4_ib_init_alias_guid_work()
795 &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); in mlx4_ib_init_alias_guid_work()
797 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in mlx4_ib_init_alias_guid_work()
798 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in mlx4_ib_init_alias_guid_work()
801 void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev) in mlx4_ib_destroy_alias_guid_service() argument
804 struct mlx4_ib_sriov *sriov = &dev->sriov; in mlx4_ib_destroy_alias_guid_service()
810 for (i = 0 ; i < dev->num_ports; i++) { in mlx4_ib_destroy_alias_guid_service()
811 cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work); in mlx4_ib_destroy_alias_guid_service()
829 for (i = 0 ; i < dev->num_ports; i++) { in mlx4_ib_destroy_alias_guid_service()
830 flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); in mlx4_ib_destroy_alias_guid_service()
831 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); in mlx4_ib_destroy_alias_guid_service()
833 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); in mlx4_ib_destroy_alias_guid_service()
834 kfree(dev->sriov.alias_guid.sa_client); in mlx4_ib_destroy_alias_guid_service()
837 int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) in mlx4_ib_init_alias_guid_service() argument
844 if (!mlx4_is_master(dev->dev)) in mlx4_ib_init_alias_guid_service()
846 dev->sriov.alias_guid.sa_client = in mlx4_ib_init_alias_guid_service()
847 kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL); in mlx4_ib_init_alias_guid_service()
848 if (!dev->sriov.alias_guid.sa_client) in mlx4_ib_init_alias_guid_service()
851 ib_sa_register_client(dev->sriov.alias_guid.sa_client); in mlx4_ib_init_alias_guid_service()
853 spin_lock_init(&dev->sriov.alias_guid.ag_work_lock); in mlx4_ib_init_alias_guid_service()
855 for (i = 1; i <= dev->num_ports; ++i) { in mlx4_ib_init_alias_guid_service()
856 if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) { in mlx4_ib_init_alias_guid_service()
862 for (i = 0 ; i < dev->num_ports; i++) { in mlx4_ib_init_alias_guid_service()
863 memset(&dev->sriov.alias_guid.ports_guid[i], 0, in mlx4_ib_init_alias_guid_service()
865 dev->sriov.alias_guid.ports_guid[i].state_flags |= in mlx4_ib_init_alias_guid_service()
869 memset(dev->sriov.alias_guid.ports_guid[i]. in mlx4_ib_init_alias_guid_service()
871 sizeof(dev->sriov.alias_guid.ports_guid[i]. in mlx4_ib_init_alias_guid_service()
874 INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); in mlx4_ib_init_alias_guid_service()
878 mlx4_set_admin_guid(dev->dev, 0, j, i + 1); in mlx4_ib_init_alias_guid_service()
880 invalidate_guid_record(dev, i + 1, j); in mlx4_ib_init_alias_guid_service()
882 dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; in mlx4_ib_init_alias_guid_service()
883 dev->sriov.alias_guid.ports_guid[i].port = i; in mlx4_ib_init_alias_guid_service()
886 dev->sriov.alias_guid.ports_guid[i].wq = in mlx4_ib_init_alias_guid_service()
888 if (!dev->sriov.alias_guid.ports_guid[i].wq) { in mlx4_ib_init_alias_guid_service()
892 INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work, in mlx4_ib_init_alias_guid_service()
899 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); in mlx4_ib_init_alias_guid_service()
900 dev->sriov.alias_guid.ports_guid[i].wq = NULL; in mlx4_ib_init_alias_guid_service()
904 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); in mlx4_ib_init_alias_guid_service()
905 kfree(dev->sriov.alias_guid.sa_client); in mlx4_ib_init_alias_guid_service()
906 dev->sriov.alias_guid.sa_client = NULL; in mlx4_ib_init_alias_guid_service()