pkgsrc-WIP-changes archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

rust: apply patch from rust issue #108426.



Module Name:	pkgsrc-wip
Committed By:	Havard Eidnes <he%NetBSD.org@localhost>
Pushed By:	he
Date:		Wed Mar 15 11:06:10 2023 +0100
Changeset:	c2eb21ffe5f6d8fd3e75ba809d9010ab557c381d

Modified Files:
	rust/distinfo
Added Files:
	rust/patches/patch-compiler_rustc__abi_src_layout.rs
	rust/patches/patch-compiler_rustc__codegen__ssa_src_mir_place.rs

Log Message:
rust: apply patch from rust issue #108426.

Specifically, this is related to
https://github.com/rust-lang/rust/issues/108426

and this got referenced from my reported issue at
https://github.com/rust-lang/rust/issues/108117

With this a native build of rust on NetBSD/sparc64
10.0_BETA succeeds.

To see a diff of this commit:
https://wip.pkgsrc.org/cgi-bin/gitweb.cgi?p=pkgsrc-wip.git;a=commitdiff;h=c2eb21ffe5f6d8fd3e75ba809d9010ab557c381d

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

diffstat:
 rust/distinfo                                      |  2 +
 .../patch-compiler_rustc__abi_src_layout.rs        | 34 +++++++++++
 ...h-compiler_rustc__codegen__ssa_src_mir_place.rs | 68 ++++++++++++++++++++++
 3 files changed, 104 insertions(+)

diffs:
diff --git a/rust/distinfo b/rust/distinfo
index b490d0a665..e603301387 100644
--- a/rust/distinfo
+++ b/rust/distinfo
@@ -108,7 +108,9 @@ Size (rust-std-1.66.1-x86_64-unknown-netbsd.tar.xz) = 29173024 bytes
 BLAKE2s (rustc-1.67.1-src.tar.gz) = 1a69a94d93083a8a2ebf84854c87bc5b0b994c324602712527c0c77de5b173d4
 SHA512 (rustc-1.67.1-src.tar.gz) = d694ecd0f1d674bcc7fc511774705f33c2d9aaef7711c96d66a74cabd6dd66bff5c4fd1aa292c901e1f275e02fb17742b556b3ec0cc4836ae326c1d6339289c7
 Size (rustc-1.67.1-src.tar.gz) = 214449596 bytes
+SHA1 (patch-compiler_rustc__abi_src_layout.rs) = 0cc673d1849e5c180533f2d822aca2feaa4ad811
 SHA1 (patch-compiler_rustc__codegen__ssa_src_back_linker.rs) = 62819cf6db7d2e3e77d433fe883046c28fc20d91
+SHA1 (patch-compiler_rustc__codegen__ssa_src_mir_place.rs) = c2d268333736ec404e64118fe34b5425cc4be0dd
 SHA1 (patch-compiler_rustc__llvm_build.rs) = 190ff5e38c3a48bb4f11f4f8e5636bdeb36a0230
 SHA1 (patch-compiler_rustc__target_src_spec_aarch64__be__unknown__netbsd.rs) = c81d0d288699056ae45569f8d0a1c9bb08153ec4
 SHA1 (patch-compiler_rustc__target_src_spec_i586__unknown__netbsd.rs) = 866d01e7c1f3d8dd7d26f2fdc4360df273ed401f
diff --git a/rust/patches/patch-compiler_rustc__abi_src_layout.rs b/rust/patches/patch-compiler_rustc__abi_src_layout.rs
new file mode 100644
index 0000000000..2cee146121
--- /dev/null
+++ b/rust/patches/patch-compiler_rustc__abi_src_layout.rs
@@ -0,0 +1,34 @@
+https://github.com/rust-lang/rust/issues/108426
+
+It reverts:
+https://github.com/rust-lang/rust/commit/97d8a9bdd3b364406577d7368f5c5203a0f9740a
+https://github.com/rust-lang/rust/commit/53e4b9dd74c29cc9308b8d0f10facac70bb101a7
+
+diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
+index 9c2cf58efed..ff8ee7fe259 100644
+--- compiler/rustc_abi/src/layout.rs.orig
++++ compiler/rustc_abi/src/layout.rs
+@@ -112,21 +112,14 @@ fn univariant<'a, V: Idx, F: Deref<Target = &'a LayoutS<V>> + Debug>(
+                             // especially with only one or two non-ZST fields.
+                             // Then place largest alignments first, largest niches within an alignment group last
+                             let f = &fields[x as usize];
+-                            let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
+-                            (!f.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
++                            (!f.is_zst(), cmp::Reverse(effective_field_align(f)))
+                         });
+                     }
+ 
+                     StructKind::Prefixed(..) => {
+                         // Sort in ascending alignment so that the layout stays optimal
+                         // regardless of the prefix.
+-                        // And put the largest niche in an alignment group at the end
+-                        // so it can be used as discriminant in jagged enums
+-                        optimizing.sort_by_key(|&x| {
+-                            let f = &fields[x as usize];
+-                            let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
+-                            (effective_field_align(f), niche_size)
+-                        });
++                        optimizing.sort_by_key(|&x| effective_field_align(&fields[x as usize]));
+                     }
+                 }
+ 
diff --git a/rust/patches/patch-compiler_rustc__codegen__ssa_src_mir_place.rs b/rust/patches/patch-compiler_rustc__codegen__ssa_src_mir_place.rs
new file mode 100644
index 0000000000..998212d066
--- /dev/null
+++ b/rust/patches/patch-compiler_rustc__codegen__ssa_src_mir_place.rs
@@ -0,0 +1,68 @@
+https://github.com/rust-lang/rust/issues/108426
+
+It reverts:
+https://github.com/rust-lang/rust/commit/97d8a9bdd3b364406577d7368f5c5203a0f9740a
+https://github.com/rust-lang/rust/commit/53e4b9dd74c29cc9308b8d0f10facac70bb101a7
+
+diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
+index 9c2cf58efed..ff8ee7fe259 100644
+--- compiler/rustc_codegen_ssa/src/mir/place.rs.orig
++++ compiler/rustc_codegen_ssa/src/mir/place.rs
+@@ -309,14 +309,14 @@
+                 // In the algorithm above, we can change
+                 // cast(relative_tag) + niche_variants.start()
+                 // into
+-                // cast(tag + (niche_variants.start() - niche_start))
++                // cast(tag) + (niche_variants.start() - niche_start)
+                 // if either the casted type is no larger than the original
+                 // type, or if the niche values are contiguous (in either the
+                 // signed or unsigned sense).
+-                let can_incr = cast_smaller || niches_ule || niches_sle;
++                let can_incr_after_cast = cast_smaller || niches_ule || niches_sle;
+ 
+                 let data_for_boundary_niche = || -> Option<(IntPredicate, u128)> {
+-                    if !can_incr {
++                    if !can_incr_after_cast {
+                         None
+                     } else if niche_start == low_unsigned {
+                         Some((IntPredicate::IntULE, niche_end))
+@@ -353,33 +353,24 @@
+                     // The algorithm is now this:
+                     // is_niche = tag <= niche_end
+                     // discr = if is_niche {
+-                    //     cast(tag + (niche_variants.start() - niche_start))
++                    //     cast(tag) + (niche_variants.start() - niche_start)
+                     // } else {
+                     //     untagged_variant
+                     // }
+                     // (the first line may instead be tag >= niche_start,
+                     // and may be a signed or unsigned comparison)
+-                    // The arithmetic must be done before the cast, so we can
+-                    // have the correct wrapping behavior. See issue #104519 for
+-                    // the consequences of getting this wrong.
+                     let is_niche =
+                         bx.icmp(predicate, tag, bx.cx().const_uint_big(tag_llty, constant));
+-                    let delta = (niche_variants.start().as_u32() as u128).wrapping_sub(niche_start);
+-                    let incr_tag = if delta == 0 {
+-                        tag
+-                    } else {
+-                        bx.add(tag, bx.cx().const_uint_big(tag_llty, delta))
+-                    };
+-
+                     let cast_tag = if cast_smaller {
+-                        bx.intcast(incr_tag, cast_to, false)
++                        bx.intcast(tag, cast_to, false)
+                     } else if niches_ule {
+-                        bx.zext(incr_tag, cast_to)
++                        bx.zext(tag, cast_to)
+                     } else {
+-                        bx.sext(incr_tag, cast_to)
++                        bx.sext(tag, cast_to)
+                     };
+ 
+-                    (is_niche, cast_tag, 0)
++                    let delta = (niche_variants.start().as_u32() as u128).wrapping_sub(niche_start);
++                    (is_niche, cast_tag, delta)
+                 } else {
+                     // The special cases don't apply, so we'll have to go with
+                     // the general algorithm.


Home | Main Index | Thread Index | Old Index