@@ -515,18 +515,22 @@ static const struct file_operations limit_fops = {
515
515
516
516
static bool someone_adding (struct mlx5_mkey_cache * cache )
517
517
{
518
- unsigned int i ;
519
-
520
- for (i = 0 ; i < MAX_MKEY_CACHE_ENTRIES ; i ++ ) {
521
- struct mlx5_cache_ent * ent = & cache -> ent [i ];
522
- bool ret ;
518
+ struct mlx5_cache_ent * ent ;
519
+ struct rb_node * node ;
520
+ bool ret ;
523
521
522
+ mutex_lock (& cache -> rb_lock );
523
+ for (node = rb_first (& cache -> rb_root ); node ; node = rb_next (node )) {
524
+ ent = rb_entry (node , struct mlx5_cache_ent , node );
524
525
xa_lock_irq (& ent -> mkeys );
525
526
ret = ent -> stored < ent -> limit ;
526
527
xa_unlock_irq (& ent -> mkeys );
527
- if (ret )
528
+ if (ret ) {
529
+ mutex_unlock (& cache -> rb_lock );
528
530
return true;
531
+ }
529
532
}
533
+ mutex_unlock (& cache -> rb_lock );
530
534
return false;
531
535
}
532
536
@@ -637,6 +641,59 @@ static void delayed_cache_work_func(struct work_struct *work)
637
641
__cache_work_func (ent );
638
642
}
639
643
644
+ static int mlx5_cache_ent_insert (struct mlx5_mkey_cache * cache ,
645
+ struct mlx5_cache_ent * ent )
646
+ {
647
+ struct rb_node * * new = & cache -> rb_root .rb_node , * parent = NULL ;
648
+ struct mlx5_cache_ent * cur ;
649
+
650
+ mutex_lock (& cache -> rb_lock );
651
+ /* Figure out where to put new node */
652
+ while (* new ) {
653
+ cur = rb_entry (* new , struct mlx5_cache_ent , node );
654
+ parent = * new ;
655
+ if (ent -> order < cur -> order )
656
+ new = & ((* new )-> rb_left );
657
+ if (ent -> order > cur -> order )
658
+ new = & ((* new )-> rb_right );
659
+ if (ent -> order == cur -> order ) {
660
+ mutex_unlock (& cache -> rb_lock );
661
+ return - EEXIST ;
662
+ }
663
+ }
664
+
665
+ /* Add new node and rebalance tree. */
666
+ rb_link_node (& ent -> node , parent , new );
667
+ rb_insert_color (& ent -> node , & cache -> rb_root );
668
+
669
+ mutex_unlock (& cache -> rb_lock );
670
+ return 0 ;
671
+ }
672
+
673
+ static struct mlx5_cache_ent * mkey_cache_ent_from_order (struct mlx5_ib_dev * dev ,
674
+ unsigned int order )
675
+ {
676
+ struct rb_node * node = dev -> cache .rb_root .rb_node ;
677
+ struct mlx5_cache_ent * cur , * smallest = NULL ;
678
+
679
+ /*
680
+ * Find the smallest ent with order >= requested_order.
681
+ */
682
+ while (node ) {
683
+ cur = rb_entry (node , struct mlx5_cache_ent , node );
684
+ if (cur -> order > order ) {
685
+ smallest = cur ;
686
+ node = node -> rb_left ;
687
+ }
688
+ if (cur -> order < order )
689
+ node = node -> rb_right ;
690
+ if (cur -> order == order )
691
+ return cur ;
692
+ }
693
+
694
+ return smallest ;
695
+ }
696
+
640
697
struct mlx5_ib_mr * mlx5_mr_cache_alloc (struct mlx5_ib_dev * dev ,
641
698
struct mlx5_cache_ent * ent ,
642
699
int access_flags )
@@ -677,10 +734,16 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
677
734
return mr ;
678
735
}
679
736
680
- static void clean_keys (struct mlx5_ib_dev * dev , int c )
737
+ struct mlx5_ib_mr * mlx5_mr_cache_alloc_order (struct mlx5_ib_dev * dev ,
738
+ u32 order , int access_flags )
739
+ {
740
+ struct mlx5_cache_ent * ent = mkey_cache_ent_from_order (dev , order );
741
+
742
+ return mlx5_mr_cache_alloc (dev , ent , access_flags );
743
+ }
744
+
745
+ static void clean_keys (struct mlx5_ib_dev * dev , struct mlx5_cache_ent * ent )
681
746
{
682
- struct mlx5_mkey_cache * cache = & dev -> cache ;
683
- struct mlx5_cache_ent * ent = & cache -> ent [c ];
684
747
u32 mkey ;
685
748
686
749
cancel_delayed_work (& ent -> dwork );
@@ -699,8 +762,8 @@ static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
699
762
if (!mlx5_debugfs_root || dev -> is_rep )
700
763
return ;
701
764
702
- debugfs_remove_recursive (dev -> cache .root );
703
- dev -> cache .root = NULL ;
765
+ debugfs_remove_recursive (dev -> cache .fs_root );
766
+ dev -> cache .fs_root = NULL ;
704
767
}
705
768
706
769
static void mlx5_mkey_cache_debugfs_init (struct mlx5_ib_dev * dev )
@@ -713,12 +776,13 @@ static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev)
713
776
if (!mlx5_debugfs_root || dev -> is_rep )
714
777
return ;
715
778
716
- cache -> root = debugfs_create_dir ("mr_cache" , mlx5_debugfs_get_dev_root (dev -> mdev ));
779
+ dir = mlx5_debugfs_get_dev_root (dev -> mdev );
780
+ cache -> fs_root = debugfs_create_dir ("mr_cache" , dir );
717
781
718
782
for (i = 0 ; i < MAX_MKEY_CACHE_ENTRIES ; i ++ ) {
719
- ent = & cache -> ent [ i ] ;
783
+ ent = mkey_cache_ent_from_order ( dev , i ) ;
720
784
sprintf (ent -> name , "%d" , ent -> order );
721
- dir = debugfs_create_dir (ent -> name , cache -> root );
785
+ dir = debugfs_create_dir (ent -> name , cache -> fs_root );
722
786
debugfs_create_file ("size" , 0600 , dir , ent , & size_fops );
723
787
debugfs_create_file ("limit" , 0600 , dir , ent , & limit_fops );
724
788
debugfs_create_ulong ("cur" , 0400 , dir , & ent -> stored );
@@ -733,13 +797,39 @@ static void delay_time_func(struct timer_list *t)
733
797
WRITE_ONCE (dev -> fill_delay , 0 );
734
798
}
735
799
800
+ struct mlx5_cache_ent * mlx5r_cache_create_ent (struct mlx5_ib_dev * dev ,
801
+ int order )
802
+ {
803
+ struct mlx5_cache_ent * ent ;
804
+ int ret ;
805
+
806
+ ent = kzalloc (sizeof (* ent ), GFP_KERNEL );
807
+ if (!ent )
808
+ return ERR_PTR (- ENOMEM );
809
+
810
+ xa_init_flags (& ent -> mkeys , XA_FLAGS_LOCK_IRQ );
811
+ ent -> order = order ;
812
+ ent -> dev = dev ;
813
+
814
+ INIT_DELAYED_WORK (& ent -> dwork , delayed_cache_work_func );
815
+
816
+ ret = mlx5_cache_ent_insert (& dev -> cache , ent );
817
+ if (ret ) {
818
+ kfree (ent );
819
+ return ERR_PTR (ret );
820
+ }
821
+ return ent ;
822
+ }
823
+
736
824
int mlx5_mkey_cache_init (struct mlx5_ib_dev * dev )
737
825
{
738
826
struct mlx5_mkey_cache * cache = & dev -> cache ;
739
827
struct mlx5_cache_ent * ent ;
740
828
int i ;
741
829
742
830
mutex_init (& dev -> slow_path_mutex );
831
+ mutex_init (& dev -> cache .rb_lock );
832
+ dev -> cache .rb_root = RB_ROOT ;
743
833
cache -> wq = alloc_ordered_workqueue ("mkey_cache" , WQ_MEM_RECLAIM );
744
834
if (!cache -> wq ) {
745
835
mlx5_ib_warn (dev , "failed to create work queue\n" );
@@ -749,13 +839,7 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
749
839
mlx5_cmd_init_async_ctx (dev -> mdev , & dev -> async_ctx );
750
840
timer_setup (& dev -> delay_timer , delay_time_func , 0 );
751
841
for (i = 0 ; i < MAX_MKEY_CACHE_ENTRIES ; i ++ ) {
752
- ent = & cache -> ent [i ];
753
- xa_init_flags (& ent -> mkeys , XA_FLAGS_LOCK_IRQ );
754
- ent -> order = i + 2 ;
755
- ent -> dev = dev ;
756
- ent -> limit = 0 ;
757
-
758
- INIT_DELAYED_WORK (& ent -> dwork , delayed_cache_work_func );
842
+ ent = mlx5r_cache_create_ent (dev , i );
759
843
760
844
if (i > MKEY_CACHE_LAST_STD_ENTRY ) {
761
845
mlx5_odp_init_mkey_cache_entry (ent );
@@ -785,14 +869,16 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
785
869
786
870
int mlx5_mkey_cache_cleanup (struct mlx5_ib_dev * dev )
787
871
{
788
- unsigned int i ;
872
+ struct rb_root * root = & dev -> cache .rb_root ;
873
+ struct mlx5_cache_ent * ent ;
874
+ struct rb_node * node ;
789
875
790
876
if (!dev -> cache .wq )
791
877
return 0 ;
792
878
793
- for ( i = 0 ; i < MAX_MKEY_CACHE_ENTRIES ; i ++ ) {
794
- struct mlx5_cache_ent * ent = & dev -> cache . ent [ i ];
795
-
879
+ mutex_lock ( & dev -> cache . rb_lock );
880
+ for ( node = rb_first ( root ); node ; node = rb_next ( node )) {
881
+ ent = rb_entry ( node , struct mlx5_cache_ent , node );
796
882
xa_lock_irq (& ent -> mkeys );
797
883
ent -> disabled = true;
798
884
xa_unlock_irq (& ent -> mkeys );
@@ -802,8 +888,15 @@ int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
802
888
mlx5_mkey_cache_debugfs_cleanup (dev );
803
889
mlx5_cmd_cleanup_async_ctx (& dev -> async_ctx );
804
890
805
- for (i = 0 ; i < MAX_MKEY_CACHE_ENTRIES ; i ++ )
806
- clean_keys (dev , i );
891
+ node = rb_first (root );
892
+ while (node ) {
893
+ ent = rb_entry (node , struct mlx5_cache_ent , node );
894
+ node = rb_next (node );
895
+ clean_keys (dev , ent );
896
+ rb_erase (& ent -> node , root );
897
+ kfree (ent );
898
+ }
899
+ mutex_unlock (& dev -> cache .rb_lock );
807
900
808
901
destroy_workqueue (dev -> cache .wq );
809
902
del_timer_sync (& dev -> delay_timer );
@@ -876,19 +969,6 @@ static int mkey_cache_max_order(struct mlx5_ib_dev *dev)
876
969
return MLX5_MAX_UMR_SHIFT ;
877
970
}
878
971
879
- static struct mlx5_cache_ent * mkey_cache_ent_from_order (struct mlx5_ib_dev * dev ,
880
- unsigned int order )
881
- {
882
- struct mlx5_mkey_cache * cache = & dev -> cache ;
883
-
884
- if (order < cache -> ent [0 ].order )
885
- return & cache -> ent [0 ];
886
- order = order - cache -> ent [0 ].order ;
887
- if (order > MKEY_CACHE_LAST_STD_ENTRY )
888
- return NULL ;
889
- return & cache -> ent [order ];
890
- }
891
-
892
972
static void set_mr_fields (struct mlx5_ib_dev * dev , struct mlx5_ib_mr * mr ,
893
973
u64 length , int access_flags , u64 iova )
894
974
{
0 commit comments