@@ -6580,3 +6580,293 @@ mono_wrapper_caches_free (MonoWrapperCaches *cache)
6580
6580
free_hash (cache -> thunk_invoke_cache );
6581
6581
free_hash (cache -> unsafe_accessor_cache );
6582
6582
}
6583
+
6584
+ typedef enum {
6585
+ SWIFT_EMPTY = 0 ,
6586
+ SWIFT_OPAQUE ,
6587
+ SWIFT_INT64 ,
6588
+ SWIFT_FLOAT ,
6589
+ SWIFT_DOUBLE ,
6590
+ } SwiftPhysicalLoweringKind ;
6591
+
6592
+ static int get_swift_lowering_alignment (SwiftPhysicalLoweringKind kind ) {
6593
+ switch (kind ) {
6594
+ case SWIFT_INT64 :
6595
+ case SWIFT_DOUBLE :
6596
+ return 8 ;
6597
+ case SWIFT_FLOAT :
6598
+ return 4 ;
6599
+ default :
6600
+ return 1 ;
6601
+ }
6602
+ }
6603
+
6604
+ static void set_lowering_range (guint8 * lowered_bytes , guint32 offset , guint32 size , SwiftPhysicalLoweringKind kind ) {
6605
+ bool force_opaque = false;
6606
+
6607
+ if (offset != ALIGN_TO (offset , get_swift_lowering_alignment (kind ))) {
6608
+ // If the start of the range is not aligned, we need to force the entire range to be opaque.
6609
+ force_opaque = true;
6610
+ }
6611
+
6612
+ // Check if any of the range is non-empty.
6613
+ // If so, we need to force this range to be opaque
6614
+ // and extend the range to the existing tag's range and mark as opaque in addition to the requested range.
6615
+
6616
+ for (guint32 i = 0 ; i < size ; ++ i ) {
6617
+ SwiftPhysicalLoweringKind current = (SwiftPhysicalLoweringKind )lowered_bytes [offset + i ];
6618
+ if (current != SWIFT_EMPTY && current != kind ) {
6619
+ force_opaque = true;
6620
+ offset = ALIGN_DOWN_TO (offset , get_swift_lowering_alignment (current ));
6621
+ size = ALIGN_TO (size + offset , get_swift_lowering_alignment (current )) - offset ;
6622
+ break ;
6623
+ }
6624
+ }
6625
+
6626
+ if (force_opaque ) {
6627
+ kind = SWIFT_OPAQUE ;
6628
+ }
6629
+
6630
+ memset (lowered_bytes + offset , kind , size );
6631
+ }
6632
+
6633
+ static void record_struct_field_physical_lowering (guint8 * lowered_bytes , MonoType * type , guint32 offset );
6634
+
6635
+ static void record_inlinearray_struct_physical_lowering (guint8 * lowered_bytes , MonoClass * klass , guint32 offset ) {
6636
+ // Get the first field and record its physical lowering N times
6637
+ MonoClassField * field = mono_class_get_fields_internal (klass , NULL );
6638
+ MonoType * fieldType = field -> type ;
6639
+ for (int i = 0 ; i < m_class_inlinearray_value (klass ); ++ i ) {
6640
+ record_struct_field_physical_lowering (lowered_bytes , fieldType , offset + m_field_get_offset (field ) + i * mono_type_size (fieldType , NULL ));
6641
+ }
6642
+ }
6643
+
6644
+ static void record_struct_physical_lowering (guint8 * lowered_bytes , MonoClass * klass , guint32 offset )
6645
+ {
6646
+ if (m_class_is_inlinearray (klass )) {
6647
+ record_inlinearray_struct_physical_lowering (lowered_bytes , klass , offset );
6648
+ return ;
6649
+ }
6650
+
6651
+ // For each field, we need to record the physical lowering of it.
6652
+ gpointer iter = NULL ;
6653
+ MonoClassField * field ;
6654
+ while ((field = mono_class_get_fields_internal (klass , & iter ))) {
6655
+ if (field -> type -> attrs & FIELD_ATTRIBUTE_STATIC )
6656
+ continue ;
6657
+ if (mono_field_is_deleted (field ))
6658
+ continue ;
6659
+
6660
+ record_struct_field_physical_lowering (lowered_bytes , field -> type , offset + m_field_get_offset (field ));
6661
+ }
6662
+ }
6663
+
6664
+ static void record_struct_field_physical_lowering (guint8 * lowered_bytes , MonoType * type , guint32 offset ) {
6665
+ // Normalize pointer types to IntPtr and resolve generic classes.
6666
+ // We don't need to care about specific pointer types at this ABI level.
6667
+ if (type -> type == MONO_TYPE_PTR || type -> type == MONO_TYPE_FNPTR ) {
6668
+ type = m_class_get_byval_arg (mono_defaults .int_class );
6669
+ }
6670
+ if (type -> type == MONO_TYPE_VALUETYPE || (type -> type == MONO_TYPE_GENERICINST && mono_type_generic_inst_is_valuetype (type ))) {
6671
+ // If a struct type is encountered, we need to record the physical lowering for each field of that struct recursively
6672
+ record_struct_physical_lowering (lowered_bytes , mono_class_from_mono_type_internal (type ), offset );
6673
+ } else {
6674
+ SwiftPhysicalLoweringKind kind = SWIFT_OPAQUE ;
6675
+ // The only types that are non-opaque are 64-bit integers, floats, doubles, and vector types.
6676
+ // We currently don't support vector types, so we'll only handle the first three.
6677
+ if (type -> type == MONO_TYPE_I8 || type -> type == MONO_TYPE_U8 ) {
6678
+ kind = SWIFT_INT64 ;
6679
+ }
6680
+ #if TARGET_SIZEOF_VOID_P == 8
6681
+ else if (type -> type == MONO_TYPE_PTR || type -> type == MONO_TYPE_FNPTR
6682
+ || type -> type == MONO_TYPE_I || type -> type == MONO_TYPE_U ) {
6683
+ kind = SWIFT_INT64 ;
6684
+ }
6685
+ #endif
6686
+ else if (type -> type == MONO_TYPE_R4 ) {
6687
+ kind = SWIFT_FLOAT ;
6688
+ } else if (type -> type == MONO_TYPE_R8 ) {
6689
+ kind = SWIFT_DOUBLE ;
6690
+ }
6691
+
6692
+ set_lowering_range (lowered_bytes , offset , mono_type_size (type , NULL ), kind );
6693
+ }
6694
+ }
6695
+
6696
+ SwiftPhysicalLowering
6697
+ mono_marshal_get_swift_physical_lowering (MonoType * type , gboolean native_layout )
6698
+ {
6699
+ // TODO: Add support for the native type layout.
6700
+ g_assert (!native_layout );
6701
+ SwiftPhysicalLowering lowering = { 0 };
6702
+
6703
+ // Normalize pointer types to IntPtr and resolve generic classes.
6704
+ // We don't need to care about specific pointer types at this ABI level.
6705
+ if (type -> type == MONO_TYPE_PTR || type -> type == MONO_TYPE_FNPTR ) {
6706
+ type = m_class_get_byval_arg (mono_defaults .int_class );
6707
+ }
6708
+
6709
+ // Non-value types are illegal at the interop boundary.
6710
+ if (type -> type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (type )) {
6711
+ lowering .by_reference = TRUE;
6712
+ return lowering ;
6713
+ } else if (type -> type != MONO_TYPE_VALUETYPE && !mono_type_is_primitive (type )) {
6714
+ lowering .by_reference = TRUE;
6715
+ return lowering ;
6716
+ }
6717
+
6718
+ MonoClass * klass = mono_class_from_mono_type_internal (type );
6719
+
6720
+ // TODO: We currently don't support vector types, so we can say that the maximum size of a non-by_reference struct
6721
+ // is 4 * PointerSize.
6722
+ // Strictly, this is inaccurate in the case where a struct has a fully-empty 8 bytes of padding using explicit layout,
6723
+ // but that's not possible in the Swift layout algorithm.
6724
+
6725
+ if (m_class_get_instance_size (klass ) > 4 * TARGET_SIZEOF_VOID_P ) {
6726
+ lowering .by_reference = TRUE;
6727
+ return lowering ;
6728
+ }
6729
+
6730
+ guint8 lowered_bytes [TARGET_SIZEOF_VOID_P * 4 ] = { 0 };
6731
+
6732
+ // Loop through all fields and get the physical lowering for each field
6733
+ record_struct_physical_lowering (lowered_bytes , klass , 0 );
6734
+
6735
+ struct _SwiftInterval {
6736
+ guint32 start ;
6737
+ guint32 size ;
6738
+ SwiftPhysicalLoweringKind kind ;
6739
+ };
6740
+
6741
+ GArray * intervals = g_array_new (FALSE, TRUE, sizeof (struct _SwiftInterval ));
6742
+
6743
+ // Now we'll build the intervals from the lowered_bytes array
6744
+ int instance_size = m_class_get_instance_size (klass );
6745
+ for (int i = 0 ; i < instance_size ; ++ i ) {
6746
+ // Don't create an interval for empty bytes
6747
+ if (lowered_bytes [i ] == SWIFT_EMPTY ) {
6748
+ continue ;
6749
+ }
6750
+
6751
+ SwiftPhysicalLoweringKind current = (SwiftPhysicalLoweringKind )lowered_bytes [i ];
6752
+
6753
+ bool start_new_interval =
6754
+ // We're at the start of the type
6755
+ i == 0
6756
+ // We're starting a new float (as we're aligned)
6757
+ || (i == ALIGN_TO (i , 4 ) && current == SWIFT_FLOAT )
6758
+ // We're starting a new double or int64_t (as we're aligned)
6759
+ || (i == ALIGN_TO (i , 8 ) && (current == SWIFT_DOUBLE || current == SWIFT_INT64 ))
6760
+ // We've changed interval types
6761
+ || current != lowered_bytes [i - 1 ];
6762
+
6763
+ if (start_new_interval ) {
6764
+ struct _SwiftInterval interval = { i , 1 , current };
6765
+ g_array_append_val (intervals , interval );
6766
+ } else {
6767
+ // Extend the current interval
6768
+ (g_array_index (intervals , struct _SwiftInterval , intervals -> len - 1 )).size ++ ;
6769
+ }
6770
+ }
6771
+
6772
+ // Merge opaque intervals that are in the same pointer-sized block
6773
+ for (int i = 0 ; i < intervals -> len - 1 ; ++ i ) {
6774
+ struct _SwiftInterval current = g_array_index (intervals , struct _SwiftInterval , i );
6775
+ struct _SwiftInterval next = g_array_index (intervals , struct _SwiftInterval , i + 1 );
6776
+
6777
+ if (current .kind == SWIFT_OPAQUE && next .kind == SWIFT_OPAQUE && current .start / TARGET_SIZEOF_VOID_P == next .start / TARGET_SIZEOF_VOID_P ) {
6778
+ current .size = next .start + next .size - current .start ;
6779
+ g_array_remove_index (intervals , i + 1 );
6780
+ i -- ;
6781
+ }
6782
+ }
6783
+
6784
+ // Now that we have the intervals, we can calculate the lowering
6785
+ MonoTypeEnum lowered_types [4 ];
6786
+ guint32 offsets [4 ];
6787
+ guint32 num_lowered_types = 0 ;
6788
+
6789
+ for (int i = 0 ; i < intervals -> len ; ++ i ) {
6790
+ if (num_lowered_types == 4 ) {
6791
+ // We can't handle more than 4 fields
6792
+ lowering .by_reference = TRUE;
6793
+ g_array_free (intervals , TRUE);
6794
+ return lowering ;
6795
+ }
6796
+
6797
+ struct _SwiftInterval interval = g_array_index (intervals , struct _SwiftInterval , i );
6798
+
6799
+ offsets [num_lowered_types ] = interval .start ;
6800
+
6801
+ switch (interval .kind ) {
6802
+ case SWIFT_INT64 :
6803
+ lowered_types [num_lowered_types ++ ] = MONO_TYPE_I8 ;
6804
+ break ;
6805
+ case SWIFT_FLOAT :
6806
+ lowered_types [num_lowered_types ++ ] = MONO_TYPE_R4 ;
6807
+ break ;
6808
+ case SWIFT_DOUBLE :
6809
+ lowered_types [num_lowered_types ++ ] = MONO_TYPE_R8 ;
6810
+ break ;
6811
+ case SWIFT_OPAQUE :
6812
+ {
6813
+ // We need to split the opaque ranges into integer parameters.
6814
+ // As part of this splitting, we must ensure that we don't introduce alignment padding.
6815
+ // This lowering algorithm should produce a lowered type sequence that would have the same padding for
6816
+ // a naturally-aligned struct with the lowered fields as the original type has.
6817
+ // This algorithm intends to split the opaque range into the least number of lowered elements that covers the entire range.
6818
+ // The lowered range is allowed to extend past the end of the opaque range (including past the end of the struct),
6819
+ // but not into the next non-empty interval.
6820
+ // However, due to the properties of the lowering (the only non-8 byte elements of the lowering are 4-byte floats),
6821
+ // we'll never encounter a scenario where we need would need to account for a correctly-aligned
6822
+ // opaque range of > 4 bytes that we must not pad to 8 bytes.
6823
+
6824
+
6825
+ // As long as we need to fill more than 4 bytes and the sequence is currently 8-byte aligned, we'll split into 8-byte integers.
6826
+ // If we have more than 2 bytes but less than 4 and the sequence is 4-byte aligned, we'll use a 4-byte integer to represent the rest of the parameters.
6827
+ // If we have 2 bytes and the sequence is 2-byte aligned, we'll use a 2-byte integer to represent the rest of the parameters.
6828
+ // If we have 1 byte, we'll use a 1-byte integer to represent the rest of the parameters.
6829
+ guint32 opaque_interval_start = interval .start ;
6830
+ // The remaining size here may become negative, so use a signed type.
6831
+ gint32 remaining_interval_size = (gint32 )interval .size ;
6832
+ while (remaining_interval_size > 0 ) {
6833
+ if (num_lowered_types == 4 ) {
6834
+ // We can't handle more than 4 fields
6835
+ lowering .by_reference = TRUE;
6836
+ g_array_free (intervals , TRUE);
6837
+ return lowering ;
6838
+ }
6839
+
6840
+ offsets [num_lowered_types ] = opaque_interval_start ;
6841
+
6842
+ if (remaining_interval_size > 8 && (opaque_interval_start % 8 == 0 )) {
6843
+ lowered_types [num_lowered_types ] = MONO_TYPE_I8 ;
6844
+ remaining_interval_size -= 8 ;
6845
+ opaque_interval_start += 8 ;
6846
+ } else if (remaining_interval_size > 4 && (opaque_interval_start % 4 == 0 )) {
6847
+ lowered_types [num_lowered_types ] = MONO_TYPE_I4 ;
6848
+ remaining_interval_size -= 4 ;
6849
+ opaque_interval_start += 4 ;
6850
+ } else if (remaining_interval_size > 2 && (opaque_interval_start % 2 == 0 )) {
6851
+ lowered_types [num_lowered_types ] = MONO_TYPE_I2 ;
6852
+ remaining_interval_size -= 2 ;
6853
+ opaque_interval_start += 2 ;
6854
+ } else {
6855
+ lowered_types [num_lowered_types ] = MONO_TYPE_U1 ;
6856
+ remaining_interval_size -= 1 ;
6857
+ opaque_interval_start += 1 ;
6858
+ }
6859
+
6860
+ num_lowered_types ++ ;
6861
+ }
6862
+ }
6863
+ }
6864
+ }
6865
+
6866
+ memcpy (lowering .lowered_elements , lowered_types , num_lowered_types * sizeof (MonoTypeEnum ));
6867
+ memcpy (lowering .offsets , offsets , num_lowered_types * sizeof (guint32 ));
6868
+ lowering .num_lowered_elements = num_lowered_types ;
6869
+ lowering .by_reference = FALSE;
6870
+
6871
+ return lowering ;
6872
+ }
0 commit comments