@@ -54,6 +54,42 @@ struct xen_gem_object {
54
54
struct sg_table * sgt ;
55
55
/* map grant handles */
56
56
grant_handle_t * map_handles ;
57
+ /*
58
+ * this is used for synchronous object deletion, e.g.
59
+ * when user-space wants to know that the grefs are unmapped
60
+ */
61
+ struct kref refcount ;
62
+ int wait_handle ;
63
+ };
64
+
65
+ struct xen_wait_obj {
66
+ struct list_head list ;
67
+ struct xen_gem_object * xen_obj ;
68
+ struct completion completion ;
69
+ };
70
+
71
+ struct xen_drv_info {
72
+ struct drm_device * drm_dev ;
73
+
74
+ /*
75
+ * for buffers created from front's grant references synchronization
76
+ * between backend and frontend is needed on buffer deletion as front
77
+ * expects us to unmap these references after XENDISPL_OP_DBUF_DESTROY
78
+ * response
79
+ * the rationale behind implementing own wait handle:
80
+ * - dumb buffer handle cannot be used as when the PRIME buffer
81
+ * gets exported there are at least 2 handles: one is for the
82
+ * backend and another one for the importing application,
83
+ * so when backend closes its handle and the other application still
84
+ * holds the buffer then there is no way for the backend to tell
85
+ * which buffer we want to wait for while calling xen_ioctl_wait_free
86
+ * - flink cannot be used as well as it is gone when DRM core
87
+ * calls .gem_free_object_unlocked
88
+ */
89
+ struct list_head wait_obj_list ;
90
+ struct idr idr ;
91
+ spinlock_t idr_lock ;
92
+ spinlock_t wait_list_lock ;
57
93
};
58
94
59
95
static inline struct xen_gem_object * to_xen_gem_obj (
@@ -62,6 +98,113 @@ static inline struct xen_gem_object *to_xen_gem_obj(
62
98
return container_of (gem_obj , struct xen_gem_object , base );
63
99
}
64
100
101
+ static struct xen_wait_obj * xen_wait_obj_new (struct xen_drv_info * drv_info ,
102
+ struct xen_gem_object * xen_obj )
103
+ {
104
+ struct xen_wait_obj * wait_obj ;
105
+
106
+ wait_obj = kzalloc (sizeof (* wait_obj ), GFP_KERNEL );
107
+ if (!wait_obj )
108
+ return ERR_PTR (- ENOMEM );
109
+
110
+ init_completion (& wait_obj -> completion );
111
+ wait_obj -> xen_obj = xen_obj ;
112
+ spin_lock (& drv_info -> wait_list_lock );
113
+ list_add (& wait_obj -> list , & drv_info -> wait_obj_list );
114
+ spin_unlock (& drv_info -> wait_list_lock );
115
+ return wait_obj ;
116
+ }
117
+
118
+ static void xen_wait_obj_free (struct xen_drv_info * drv_info ,
119
+ struct xen_wait_obj * wait_obj )
120
+ {
121
+ struct xen_wait_obj * cur_wait_obj , * q ;
122
+
123
+ spin_lock (& drv_info -> wait_list_lock );
124
+ list_for_each_entry_safe (cur_wait_obj , q ,
125
+ & drv_info -> wait_obj_list , list ) {
126
+ if (cur_wait_obj == wait_obj ) {
127
+ list_del (& wait_obj -> list );
128
+ kfree (wait_obj );
129
+ break ;
130
+ }
131
+ }
132
+ spin_unlock (& drv_info -> wait_list_lock );
133
+ }
134
+
135
+ static void xen_wait_obj_check_pending (struct xen_drv_info * drv_info )
136
+ {
137
+ /*
138
+ * it is intended to be called from .last_close when
139
+ * no pending wait objects should be on the list.
140
+ * make sure we don't miss a bug if this is not the case
141
+ */
142
+ if (!list_empty (& drv_info -> wait_obj_list )) {
143
+ DRM_ERROR ("Removing with pending wait objects!\n" );
144
+ BUG ();
145
+ }
146
+ }
147
+
148
+ static int xen_wait_obj_wait (struct xen_wait_obj * wait_obj ,
149
+ uint32_t wait_to_ms )
150
+ {
151
+ if (wait_for_completion_timeout (& wait_obj -> completion ,
152
+ msecs_to_jiffies (wait_to_ms )) <= 0 )
153
+ return - ETIMEDOUT ;
154
+
155
+ return 0 ;
156
+ }
157
+
158
+ static void xen_wait_obj_signal (struct xen_drv_info * drv_info ,
159
+ struct xen_gem_object * xen_obj )
160
+ {
161
+ struct xen_wait_obj * wait_obj , * q ;
162
+
163
+ spin_lock (& drv_info -> wait_list_lock );
164
+ list_for_each_entry_safe (wait_obj , q , & drv_info -> wait_obj_list , list ) {
165
+ if (wait_obj -> xen_obj == xen_obj ) {
166
+ DRM_DEBUG ("Found xen_obj in the wait list, wake\n" );
167
+ complete_all (& wait_obj -> completion );
168
+ }
169
+ }
170
+ spin_unlock (& drv_info -> wait_list_lock );
171
+ }
172
+
173
+ static int xen_wait_obj_handle_new (struct xen_drv_info * drv_info ,
174
+ struct xen_gem_object * xen_obj )
175
+ {
176
+ int ret ;
177
+
178
+ idr_preload (GFP_KERNEL );
179
+ spin_lock (& drv_info -> idr_lock );
180
+ ret = idr_alloc (& drv_info -> idr , xen_obj , 1 , 0 , GFP_NOWAIT );
181
+ spin_unlock (& drv_info -> idr_lock );
182
+ idr_preload_end ();
183
+ return ret ;
184
+ }
185
+
186
+ static void xen_wait_obj_handle_free (struct xen_drv_info * drv_info ,
187
+ struct xen_gem_object * xen_obj )
188
+ {
189
+ spin_lock (& drv_info -> idr_lock );
190
+ idr_remove (& drv_info -> idr , xen_obj -> wait_handle );
191
+ spin_unlock (& drv_info -> idr_lock );
192
+ }
193
+
194
+ static struct xen_gem_object * xen_get_obj_by_wait_handle (
195
+ struct xen_drv_info * drv_info , int wait_handle )
196
+ {
197
+ struct xen_gem_object * xen_obj ;
198
+
199
+ spin_lock (& drv_info -> idr_lock );
200
+ /* check if xen_obj still exists */
201
+ xen_obj = idr_find (& drv_info -> idr , wait_handle );
202
+ if (xen_obj )
203
+ kref_get (& xen_obj -> refcount );
204
+ spin_unlock (& drv_info -> idr_lock );
205
+ return xen_obj ;
206
+ }
207
+
65
208
#ifdef CONFIG_DRM_XEN_ZCOPY_CMA
66
209
static int xen_alloc_ballooned_pages (struct device * dev ,
67
210
struct xen_gem_object * xen_obj )
@@ -413,10 +556,22 @@ static int xen_gem_init_obj(struct xen_gem_object *xen_obj,
413
556
return 0 ;
414
557
}
415
558
559
+ static void xen_obj_release (struct kref * kref )
560
+ {
561
+ struct xen_gem_object * xen_obj =
562
+ container_of (kref , struct xen_gem_object , refcount );
563
+ struct xen_drv_info * drv_info = xen_obj -> base .dev -> dev_private ;
564
+
565
+ xen_wait_obj_signal (drv_info , xen_obj );
566
+ kfree (xen_obj );
567
+ }
568
+
416
569
static void xen_gem_free_object (struct drm_gem_object * gem_obj )
417
570
{
418
571
struct xen_gem_object * xen_obj = to_xen_gem_obj (gem_obj );
572
+ struct xen_drv_info * drv_info = gem_obj -> dev -> dev_private ;
419
573
574
+ DRM_DEBUG ("Freeing dumb with handle %d\n" , xen_obj -> dumb_handle );
420
575
if (xen_obj -> grefs ) {
421
576
if (xen_obj -> sgt ) {
422
577
if (xen_obj -> base .import_attach )
@@ -428,7 +583,9 @@ static void xen_gem_free_object(struct drm_gem_object *gem_obj)
428
583
}
429
584
}
430
585
drm_gem_object_release (gem_obj );
431
- kfree (xen_obj );
586
+
587
+ xen_wait_obj_handle_free (drv_info , xen_obj );
588
+ kref_put (& xen_obj -> refcount , xen_obj_release );
432
589
}
433
590
434
591
#ifdef CONFIG_DRM_XEN_ZCOPY_WA_SWIOTLB
@@ -495,6 +652,7 @@ struct drm_gem_object *xen_gem_prime_import_sg_table(struct drm_device *dev,
495
652
ret = xen_gem_init_obj (xen_obj , dev , attach -> dmabuf -> size );
496
653
if (ret < 0 )
497
654
goto fail ;
655
+ kref_init (& xen_obj -> refcount );
498
656
xen_obj -> sgt = sgt ;
499
657
xen_obj -> num_pages = DIV_ROUND_UP (attach -> dmabuf -> size , PAGE_SIZE );
500
658
DRM_DEBUG ("Imported buffer of size %zu with nents %u\n" ,
@@ -510,12 +668,14 @@ static int xen_do_ioctl_from_refs(struct drm_device *dev,
510
668
struct drm_xen_zcopy_dumb_from_refs * req ,
511
669
struct drm_file * file_priv )
512
670
{
671
+ struct xen_drv_info * drv_info = dev -> dev_private ;
513
672
struct xen_gem_object * xen_obj ;
514
673
int ret ;
515
674
516
675
xen_obj = kzalloc (sizeof (* xen_obj ), GFP_KERNEL );
517
676
if (!xen_obj )
518
677
return - ENOMEM ;
678
+ kref_init (& xen_obj -> refcount );
519
679
xen_obj -> num_pages = req -> num_grefs ;
520
680
xen_obj -> otherend_id = req -> otherend_id ;
521
681
xen_obj -> grefs = kcalloc (xen_obj -> num_pages , sizeof (grant_ref_t ),
@@ -538,6 +698,19 @@ static int xen_do_ioctl_from_refs(struct drm_device *dev,
538
698
goto fail ;
539
699
/* return handle */
540
700
req -> dumb .handle = xen_obj -> dumb_handle ;
701
+
702
+ /*
703
+ * get user-visible handle for this GEM object.
704
+ * the wait object is not allocated at the moment,
705
+ * but if need be it will be allocated at the time of
706
+ * DRM_XEN_ZCOPY_DUMB_WAIT_FREE IOCTL
707
+ */
708
+ ret = xen_wait_obj_handle_new (drv_info , xen_obj );
709
+ if (ret < 0 )
710
+ goto fail ;
711
+
712
+ req -> wait_handle = ret ;
713
+ xen_obj -> wait_handle = ret ;
541
714
return 0 ;
542
715
543
716
fail :
@@ -643,13 +816,61 @@ static int xen_ioctl_to_refs(struct drm_device *dev,
643
816
return ret ;
644
817
}
645
818
819
+ static int xen_ioctl_wait_free (struct drm_device * dev ,
820
+ void * data , struct drm_file * file_priv )
821
+ {
822
+ struct drm_xen_zcopy_dumb_wait_free * req =
823
+ (struct drm_xen_zcopy_dumb_wait_free * )data ;
824
+ struct xen_drv_info * drv_info = dev -> dev_private ;
825
+ struct xen_gem_object * xen_obj ;
826
+ struct xen_wait_obj * wait_obj ;
827
+ int wait_handle , ret ;
828
+
829
+ wait_handle = req -> wait_handle ;
830
+ /*
831
+ * try to find the wait handle: if not found means that
832
+ * either the handle has already been freed or wrong
833
+ */
834
+ xen_obj = xen_get_obj_by_wait_handle (drv_info , wait_handle );
835
+ if (!xen_obj )
836
+ return - ENOENT ;
837
+
838
+ /*
839
+ * xen_obj still exists and is reference count locked by us now, so
840
+ * prepare to wait: allocate wait object and add it to the wait list,
841
+ * so we can find it on release
842
+ */
843
+ wait_obj = xen_wait_obj_new (drv_info , xen_obj );
844
+ /* put our reference and wait for xen_obj release to fire */
845
+ kref_put (& xen_obj -> refcount , xen_obj_release );
846
+ ret = PTR_ERR_OR_ZERO (wait_obj );
847
+ if (ret < 0 ) {
848
+ DRM_ERROR ("Failed to setup wait object, ret %d\n" , ret );
849
+ return ret ;
850
+ }
851
+
852
+ ret = xen_wait_obj_wait (wait_obj , req -> wait_to_ms );
853
+ xen_wait_obj_free (drv_info , wait_obj );
854
+ return ret ;
855
+ }
856
+
857
+ static void xen_lastclose (struct drm_device * dev )
858
+ {
859
+ struct xen_drv_info * drv_info = dev -> dev_private ;
860
+
861
+ xen_wait_obj_check_pending (drv_info );
862
+ }
863
+
646
864
static const struct drm_ioctl_desc xen_ioctls [] = {
647
865
DRM_IOCTL_DEF_DRV (XEN_ZCOPY_DUMB_FROM_REFS ,
648
866
xen_ioctl_from_refs ,
649
867
DRM_AUTH | DRM_CONTROL_ALLOW | DRM_UNLOCKED ),
650
868
DRM_IOCTL_DEF_DRV (XEN_ZCOPY_DUMB_TO_REFS ,
651
869
xen_ioctl_to_refs ,
652
870
DRM_AUTH | DRM_CONTROL_ALLOW | DRM_UNLOCKED ),
871
+ DRM_IOCTL_DEF_DRV (XEN_ZCOPY_DUMB_WAIT_FREE ,
872
+ xen_ioctl_wait_free ,
873
+ DRM_AUTH | DRM_CONTROL_ALLOW | DRM_UNLOCKED ),
653
874
};
654
875
655
876
static const struct file_operations xen_fops = {
@@ -661,6 +882,7 @@ static const struct file_operations xen_fops = {
661
882
662
883
static struct drm_driver xen_driver = {
663
884
.driver_features = DRIVER_GEM | DRIVER_PRIME ,
885
+ .lastclose = xen_lastclose ,
664
886
.prime_handle_to_fd = drm_gem_prime_handle_to_fd ,
665
887
.gem_prime_export = drm_gem_prime_export ,
666
888
.gem_prime_get_sg_table = xen_gem_prime_get_sg_table ,
@@ -680,42 +902,53 @@ static struct drm_driver xen_driver = {
680
902
681
903
static int xen_remove (struct platform_device * pdev )
682
904
{
683
- struct drm_device * drm_dev = platform_get_drvdata (pdev );
905
+ struct xen_drv_info * drv_info = platform_get_drvdata (pdev );
684
906
685
- if (drm_dev ) {
686
- drm_dev_unregister (drm_dev );
687
- drm_dev_unref (drm_dev );
907
+ if (drv_info && drv_info -> drm_dev ) {
908
+ drm_dev_unregister (drv_info -> drm_dev );
909
+ drm_dev_unref (drv_info -> drm_dev );
910
+ idr_destroy (& drv_info -> idr );
688
911
}
689
912
return 0 ;
690
913
}
691
914
692
915
static int xen_probe (struct platform_device * pdev )
693
916
{
694
- struct drm_device * drm_dev ;
917
+ struct xen_drv_info * drv_info ;
695
918
int ret ;
696
919
697
920
DRM_INFO ("Creating %s\n" , xen_driver .desc );
921
+ drv_info = kzalloc (sizeof (* drv_info ), GFP_KERNEL );
922
+ if (!drv_info )
923
+ return - ENOMEM ;
924
+
925
+ idr_init (& drv_info -> idr );
926
+ spin_lock_init (& drv_info -> idr_lock );
927
+ spin_lock_init (& drv_info -> wait_list_lock );
928
+ INIT_LIST_HEAD (& drv_info -> wait_obj_list );
698
929
#ifdef CONFIG_DRM_XEN_ZCOPY_CMA
699
930
arch_setup_dma_ops (& pdev -> dev , 0 , 0 , NULL , false);
700
931
#endif
701
- drm_dev = drm_dev_alloc (& xen_driver , & pdev -> dev );
702
- if (!drm_dev )
932
+ drv_info -> drm_dev = drm_dev_alloc (& xen_driver , & pdev -> dev );
933
+ if (!drv_info -> drm_dev )
703
934
return - ENOMEM ;
704
935
705
- ret = drm_dev_register (drm_dev , 0 );
936
+ ret = drm_dev_register (drv_info -> drm_dev , 0 );
706
937
if (ret < 0 )
707
938
goto fail ;
708
939
709
- platform_set_drvdata (pdev , drm_dev );
940
+ drv_info -> drm_dev -> dev_private = drv_info ;
941
+ platform_set_drvdata (pdev , drv_info );
710
942
711
943
DRM_INFO ("Initialized %s %d.%d.%d %s on minor %d\n" ,
712
944
xen_driver .name , xen_driver .major ,
713
945
xen_driver .minor , xen_driver .patchlevel ,
714
- xen_driver .date , drm_dev -> primary -> index );
946
+ xen_driver .date , drv_info -> drm_dev -> primary -> index );
715
947
return 0 ;
716
948
717
949
fail :
718
- drm_dev_unref (drm_dev );
950
+ drm_dev_unref (drv_info -> drm_dev );
951
+ kfree (drv_info );
719
952
return ret ;
720
953
}
721
954
0 commit comments