pkgsrc-Changes archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

CVS commit: pkgsrc/graphics/librsvg



Module Name:    pkgsrc
Committed By:   he
Date:           Sat Feb 15 23:41:47 UTC 2025

Modified Files:
        pkgsrc/graphics/librsvg: distinfo
Added Files:
        pkgsrc/graphics/librsvg/patches:
            patch-.._vendor_aho-corasick-1.1.2_src_packed_teddy_builder.rs
            patch-.._vendor_aho-corasick-1.1.2_src_packed_vector.rs
            patch-.._vendor_half-2.3.1_src_binary16_arch.rs
            patch-.._vendor_matrixmultiply-0.3.8_src_dgemm__kernel.rs
            patch-.._vendor_matrixmultiply-0.3.8_src_sgemm__kernel.rs
            patch-.._vendor_matrixmultiply-0.3.9_src_dgemm__kernel.rs
            patch-.._vendor_matrixmultiply-0.3.9_src_sgemm__kernel.rs
            patch-.._vendor_memchr-2.7.1_src_arch_aarch64_memchr.rs
            patch-.._vendor_memchr-2.7.1_src_arch_aarch64_mod.rs
            patch-.._vendor_memchr-2.7.1_src_memchr.rs
            patch-.._vendor_memchr-2.7.1_src_memmem_searcher.rs
            patch-.._vendor_memchr-2.7.1_src_vector.rs
            patch-.._vendor_memchr-2.7.4_src_arch_aarch64_memchr.rs
            patch-.._vendor_memchr-2.7.4_src_arch_aarch64_mod.rs
            patch-.._vendor_memchr-2.7.4_src_memchr.rs
            patch-.._vendor_memchr-2.7.4_src_memmem_searcher.rs
            patch-.._vendor_memchr-2.7.4_src_vector.rs
            patch-.._vendor_wide-0.7.15_src_f32x4__.rs
            patch-.._vendor_wide-0.7.15_src_f64x2__.rs
            patch-.._vendor_wide-0.7.15_src_i16x8__.rs
            patch-.._vendor_wide-0.7.15_src_i32x4__.rs
            patch-.._vendor_wide-0.7.15_src_i64x2__.rs
            patch-.._vendor_wide-0.7.15_src_i8x16__.rs
            patch-.._vendor_wide-0.7.15_src_u16x8__.rs
            patch-.._vendor_wide-0.7.15_src_u32x4__.rs
            patch-.._vendor_wide-0.7.15_src_u64x2__.rs
            patch-.._vendor_wide-0.7.15_src_u8x16__.rs
            patch-.._vendor_wide-0.7.26_src_f32x4__.rs
            patch-.._vendor_wide-0.7.26_src_f64x2__.rs
            patch-.._vendor_wide-0.7.26_src_i16x8__.rs
            patch-.._vendor_wide-0.7.26_src_i32x4__.rs
            patch-.._vendor_wide-0.7.26_src_i64x2__.rs
            patch-.._vendor_wide-0.7.26_src_i8x16__.rs
            patch-.._vendor_wide-0.7.26_src_u16x8__.rs
            patch-.._vendor_wide-0.7.26_src_u32x4__.rs
            patch-.._vendor_wide-0.7.26_src_u64x2__.rs
            patch-.._vendor_wide-0.7.26_src_u8x16__.rs
            patch-.._vendor_zerocopy-0.7.35_src_lib.rs
            patch-.._vendor_zune-jpeg-0.4.13_src_idct.rs
            patch-.._vendor_zune-jpeg-0.4.13_src_unsafe__utils.rs
            patch-.._vendor_zune-jpeg-0.4.13_src_unsafe__utils__neon.rs

Log Message:
graphics/librsvg: add patches so that this builds on aarch64_eb.

This basically consists of adding conditional of target_endian = "little"
in various places for conditional compilation.


To generate a diff of this commit:
cvs rdiff -u -r1.66 -r1.67 pkgsrc/graphics/librsvg/distinfo
cvs rdiff -u -r0 -r1.1 \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_aho-corasick-1.1.2_src_packed_teddy_builder.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_aho-corasick-1.1.2_src_packed_vector.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_half-2.3.1_src_binary16_arch.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_matrixmultiply-0.3.8_src_dgemm__kernel.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_matrixmultiply-0.3.8_src_sgemm__kernel.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_matrixmultiply-0.3.9_src_dgemm__kernel.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_matrixmultiply-0.3.9_src_sgemm__kernel.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_arch_aarch64_memchr.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_arch_aarch64_mod.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_memchr.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_memmem_searcher.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_vector.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_arch_aarch64_memchr.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_arch_aarch64_mod.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_memchr.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_memmem_searcher.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_vector.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_f32x4__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_f64x2__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_i16x8__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_i32x4__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_i64x2__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_i8x16__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_u16x8__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_u32x4__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_u64x2__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_u8x16__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_f32x4__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_f64x2__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_i16x8__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_i32x4__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_i64x2__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_i8x16__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_u16x8__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_u32x4__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_u64x2__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_u8x16__.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_zerocopy-0.7.35_src_lib.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_zune-jpeg-0.4.13_src_idct.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_zune-jpeg-0.4.13_src_unsafe__utils.rs \
    pkgsrc/graphics/librsvg/patches/patch-.._vendor_zune-jpeg-0.4.13_src_unsafe__utils__neon.rs

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: pkgsrc/graphics/librsvg/distinfo
diff -u pkgsrc/graphics/librsvg/distinfo:1.66 pkgsrc/graphics/librsvg/distinfo:1.67
--- pkgsrc/graphics/librsvg/distinfo:1.66       Fri Dec 27 08:47:57 2024
+++ pkgsrc/graphics/librsvg/distinfo    Sat Feb 15 23:41:45 2025
@@ -1,4 +1,4 @@
-$NetBSD: distinfo,v 1.66 2024/12/27 08:47:57 wiz Exp $
+$NetBSD: distinfo,v 1.67 2025/02/15 23:41:45 he Exp $
 
 BLAKE2s (adler-1.0.2.crate) = f263779d752af886455fc0f42c997893fb1a09edcf1bd2980605251c2e3602aa
 SHA512 (adler-1.0.2.crate) = 7ab190d31890fc05b0b55d8e2c6527a505e06793d5496be0b3831e0513412f9ba97f8148f6f68ed0770fa9cd980a5092d885e058becf1d5506b7c74b82674aa1
@@ -879,4 +879,45 @@ Size (zune-core-0.4.12.crate) = 17355 by
 BLAKE2s (zune-jpeg-0.4.13.crate) = c73611b98918c5fb627ae71e36c4386b34e68222cb8467abbb1c0c8fba0ce0dd
 SHA512 (zune-jpeg-0.4.13.crate) = ebd349cfee5756fff64cbfc9a59bfd1cf262453ec74c6225e058d4c09398e9c7086f340d6e5215dd72fa32d29bf0fc86e64cf42038ea1b0c1baa67097a214875
 Size (zune-jpeg-0.4.13.crate) = 62999 bytes
+SHA1 (patch-.._vendor_aho-corasick-1.1.2_src_packed_teddy_builder.rs) = 5f1c2ca0b4cb0e7c062866ace876ba3137b4b1e2
+SHA1 (patch-.._vendor_aho-corasick-1.1.2_src_packed_vector.rs) = d3a5394e861eb3035a96441db00e2f8d3d2e0dda
 SHA1 (patch-.._vendor_cfg-expr-0.15.8_src_targets_builtins.rs) = d8de3edfff043b6cd1cc314c62c87adcc5beddbd
+SHA1 (patch-.._vendor_half-2.3.1_src_binary16_arch.rs) = 8bb46b6fae0b5a445aa7f7446c0387df10a8f547
+SHA1 (patch-.._vendor_matrixmultiply-0.3.8_src_dgemm__kernel.rs) = 355fa939101cdd077b1cae4a7a3624bb9d818690
+SHA1 (patch-.._vendor_matrixmultiply-0.3.8_src_sgemm__kernel.rs) = 69faa462994ff387f9e0c625e24762bbe9bdc03c
+SHA1 (patch-.._vendor_matrixmultiply-0.3.9_src_dgemm__kernel.rs) = d967e6918dccf3723efa78366a6713ad0af33536
+SHA1 (patch-.._vendor_matrixmultiply-0.3.9_src_sgemm__kernel.rs) = d98ef31134fd94d0068dd5bd1602d3de6383ab75
+SHA1 (patch-.._vendor_memchr-2.7.1_src_arch_aarch64_memchr.rs) = 688ec0f8cd452f9c774f44d5e106b8e4cecc54f8
+SHA1 (patch-.._vendor_memchr-2.7.1_src_arch_aarch64_mod.rs) = 50ed6fca6e633814d4e24ec28031e8427fc0c304
+SHA1 (patch-.._vendor_memchr-2.7.1_src_memchr.rs) = 6bf0a24fb9e5744c72d0827b037d06b8ef22f797
+SHA1 (patch-.._vendor_memchr-2.7.1_src_memmem_searcher.rs) = a1702d71578d51305161a14ae3350911a5ad1aa3
+SHA1 (patch-.._vendor_memchr-2.7.1_src_vector.rs) = 27336f42ff99991d35edae63fb78ad40a658ca43
+SHA1 (patch-.._vendor_memchr-2.7.4_src_arch_aarch64_memchr.rs) = 7539a89e29d600a732b04f84377f76e47cf0dfd1
+SHA1 (patch-.._vendor_memchr-2.7.4_src_arch_aarch64_mod.rs) = 690e22e915ef8ad6c3381ebdc10574fae5316f4f
+SHA1 (patch-.._vendor_memchr-2.7.4_src_memchr.rs) = 3c7b0002b847ce44e5f35cd3a9089b9983017023
+SHA1 (patch-.._vendor_memchr-2.7.4_src_memmem_searcher.rs) = 862bfd9a4dee057a690f8d234e1ae893ae173b53
+SHA1 (patch-.._vendor_memchr-2.7.4_src_vector.rs) = 30577c24ed0990a622bd8bf7439803ee4c5613ba
+SHA1 (patch-.._vendor_wide-0.7.15_src_f32x4__.rs) = 7328f539234a9718dd081d344873b509b2c1c997
+SHA1 (patch-.._vendor_wide-0.7.15_src_f64x2__.rs) = 0836c2099a5eb710099197a010a505af0b3e53e1
+SHA1 (patch-.._vendor_wide-0.7.15_src_i16x8__.rs) = 55d892609a14a6d6c5abbce7cf0f59247d2fc1f7
+SHA1 (patch-.._vendor_wide-0.7.15_src_i32x4__.rs) = eeefa64ef2e6bd523da3a820b94be2546571d025
+SHA1 (patch-.._vendor_wide-0.7.15_src_i64x2__.rs) = dab1fd177bd6495e918c1b89fbc9510ebf9dda87
+SHA1 (patch-.._vendor_wide-0.7.15_src_i8x16__.rs) = b8e1eb04cefc648c018ce1fbb02e1a0c07c6ebfd
+SHA1 (patch-.._vendor_wide-0.7.15_src_u16x8__.rs) = 0722b258bec04107a45e2adc812e598fa112cc48
+SHA1 (patch-.._vendor_wide-0.7.15_src_u32x4__.rs) = 3892268b42ea73be15ed30695feae5c8b33eb6e1
+SHA1 (patch-.._vendor_wide-0.7.15_src_u64x2__.rs) = 5ad16fe4d38f89c9f9851432723946fe43e0380f
+SHA1 (patch-.._vendor_wide-0.7.15_src_u8x16__.rs) = 422df5b1df2f8a00c1680be712a1e69f1022000c
+SHA1 (patch-.._vendor_wide-0.7.26_src_f32x4__.rs) = b3d6e78eb47487a1c520d154d073895663100fd9
+SHA1 (patch-.._vendor_wide-0.7.26_src_f64x2__.rs) = 357e24389fe87a7ff01bf90bac946a8a169c1678
+SHA1 (patch-.._vendor_wide-0.7.26_src_i16x8__.rs) = df321ccbaba96ba2dd5870280118dd8a55c5d683
+SHA1 (patch-.._vendor_wide-0.7.26_src_i32x4__.rs) = ceef1e99584386988a97ced186de5949b7efa8f3
+SHA1 (patch-.._vendor_wide-0.7.26_src_i64x2__.rs) = d9336f2a09aa23360c9676af68da6f03120339ba
+SHA1 (patch-.._vendor_wide-0.7.26_src_i8x16__.rs) = ce57a82f50af120745a652806ff7e17611b5074e
+SHA1 (patch-.._vendor_wide-0.7.26_src_u16x8__.rs) = 1e01106f0e43657c3bb93aa2aa29011960e438ae
+SHA1 (patch-.._vendor_wide-0.7.26_src_u32x4__.rs) = 46b922c71afc105ea1c73b3cffddd2f0ef5e4983
+SHA1 (patch-.._vendor_wide-0.7.26_src_u64x2__.rs) = 04aecc087292be4fad0b323a56d66009f468cf11
+SHA1 (patch-.._vendor_wide-0.7.26_src_u8x16__.rs) = 0589117bb132a0e0ec516e1e288181e6c4901255
+SHA1 (patch-.._vendor_zerocopy-0.7.35_src_lib.rs) = ef8e7e6151bdfd9e05167e6b49c74fa9df8cee22
+SHA1 (patch-.._vendor_zune-jpeg-0.4.13_src_idct.rs) = ccd717b06bd8de69366bf2f66b8250565db5e5f4
+SHA1 (patch-.._vendor_zune-jpeg-0.4.13_src_unsafe__utils.rs) = f5a1b6a8152d78bac04f885622768185d8fc0089
+SHA1 (patch-.._vendor_zune-jpeg-0.4.13_src_unsafe__utils__neon.rs) = c55fed2cbbbc4f26657c16cd193e0f191cc34487

Added files:

Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_aho-corasick-1.1.2_src_packed_teddy_builder.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_aho-corasick-1.1.2_src_packed_teddy_builder.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_aho-corasick-1.1.2_src_packed_teddy_builder.rs      Sat Feb 15 23:41:45 2025
@@ -0,0 +1,33 @@
+$NetBSD: patch-.._vendor_aho-corasick-1.1.2_src_packed_teddy_builder.rs,v 1.1 2025/02/15 23:41:45 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/aho-corasick-1.1.2/src/packed/teddy/builder.rs.orig      2006-07-24 01:21:28.000000000 +0000
++++ ../vendor/aho-corasick-1.1.2/src/packed/teddy/builder.rs
+@@ -230,7 +230,7 @@ impl Builder {
+                 }
+             }
+         }
+-        #[cfg(target_arch = "aarch64")]
++        #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+         {
+             use self::aarch64::SlimNeon;
+ 
+@@ -301,7 +301,7 @@ impl Builder {
+         }
+         #[cfg(not(any(
+             all(target_arch = "x86_64", target_feature = "sse2"),
+-            target_arch = "aarch64"
++            all(target_arch = "aarch64", target_endian = "little")
+         )))]
+         {
+             None
+@@ -705,7 +705,7 @@ mod x86_64 {
+     }
+ }
+ 
+-#[cfg(target_arch = "aarch64")]
++#[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+ mod aarch64 {
+     use core::arch::aarch64::uint8x16_t;
+ 
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_aho-corasick-1.1.2_src_packed_vector.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_aho-corasick-1.1.2_src_packed_vector.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_aho-corasick-1.1.2_src_packed_vector.rs     Sat Feb 15 23:41:45 2025
@@ -0,0 +1,28 @@
+$NetBSD: patch-.._vendor_aho-corasick-1.1.2_src_packed_vector.rs,v 1.1 2025/02/15 23:41:45 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/aho-corasick-1.1.2/src/packed/vector.rs.orig     2006-07-24 01:21:28.000000000 +0000
++++ ../vendor/aho-corasick-1.1.2/src/packed/vector.rs
+@@ -595,7 +595,7 @@ mod x86_64_avx2 {
+     }
+ }
+ 
+-#[cfg(target_arch = "aarch64")]
++#[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+ mod aarch64_neon {
+     use core::arch::aarch64::*;
+ 
+@@ -1466,7 +1466,11 @@ mod tests_x86_64_avx2 {
+     }
+ }
+ 
+-#[cfg(all(test, target_arch = "aarch64", target_feature = "neon"))]
++#[cfg(all(test,
++          target_arch = "aarch64",
++          target_feature = "neon",
++          target_endian = "little"
++))]
+ mod tests_aarch64_neon {
+     use core::arch::aarch64::*;
+ 
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_half-2.3.1_src_binary16_arch.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_half-2.3.1_src_binary16_arch.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_half-2.3.1_src_binary16_arch.rs     Sat Feb 15 23:41:45 2025
@@ -0,0 +1,47 @@
+$NetBSD: patch-.._vendor_half-2.3.1_src_binary16_arch.rs,v 1.1 2025/02/15 23:41:45 he Exp $
+
+Try to use neon only on little-endian aarch64.
+
+--- ../vendor/half-2.3.1/src/binary16/arch.rs.orig     2024-10-02 18:19:08.769858146 +0000
++++ ../vendor/half-2.3.1/src/binary16/arch.rs
+@@ -5,7 +5,7 @@ use core::mem;
+ #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+ mod x86;
+ 
+-#[cfg(target_arch = "aarch64")]
++#[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+ mod aarch64;
+ 
+ macro_rules! convert_fn {
+@@ -22,6 +22,7 @@ macro_rules! convert_fn {
+             }
+             else if #[cfg(all(
+                 target_arch = "aarch64",
++                target_endian = "little",
+                 target_feature = "fp16"
+             ))] {
+                 $aarch64
+@@ -43,6 +44,7 @@ macro_rules! convert_fn {
+             else if #[cfg(all(
+                 feature = "std",
+                 target_arch = "aarch64",
++                target_endian = "little",
+             ))] {
+                 use std::arch::is_aarch64_feature_detected;
+                 if is_aarch64_feature_detected!("fp16") {
+@@ -299,6 +301,7 @@ macro_rules! math_fn {
+             // Use intrinsics directly when a compile target or using no_std
+             if #[cfg(all(
+                 target_arch = "aarch64",
++                target_endian = "little",
+                 target_feature = "fp16"
+             ))] {
+                 $aarch64
+@@ -308,6 +311,7 @@ macro_rules! math_fn {
+             else if #[cfg(all(
+                 feature = "std",
+                 target_arch = "aarch64",
++                target_endian = "little",
+                 not(target_feature = "fp16")
+             ))] {
+                 use std::arch::is_aarch64_feature_detected;
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_matrixmultiply-0.3.8_src_dgemm__kernel.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_matrixmultiply-0.3.8_src_dgemm__kernel.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_matrixmultiply-0.3.8_src_dgemm__kernel.rs   Sat Feb 15 23:41:45 2025
@@ -0,0 +1,51 @@
+$NetBSD: patch-.._vendor_matrixmultiply-0.3.8_src_dgemm__kernel.rs,v 1.1 2025/02/15 23:41:45 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/matrixmultiply-0.3.8/src/dgemm_kernel.rs.orig    2024-10-02 14:26:39.390937007 +0000
++++ ../vendor/matrixmultiply-0.3.8/src/dgemm_kernel.rs
+@@ -28,7 +28,7 @@ struct KernelFma;
+ #[cfg(any(target_arch="x86", target_arch="x86_64"))]
+ struct KernelSse2;
+ 
+-#[cfg(target_arch="aarch64")]
++#[cfg(all(target_arch="aarch64", target_endian = "little"))]
+ #[cfg(has_aarch64_simd)]
+ struct KernelNeon;
+ 
+@@ -58,7 +58,7 @@ pub(crate) fn detect<G>(selector: G) whe
+         }
+     }
+ 
+-    #[cfg(target_arch="aarch64")]
++    #[cfg(all(target_arch="aarch64", target_endian = "little"))]
+     #[cfg(has_aarch64_simd)]
+     {
+         if is_aarch64_feature_detected_!("neon") {
+@@ -232,7 +232,7 @@ impl GemmKernel for KernelSse2 {
+     }
+ }
+ 
+-#[cfg(target_arch="aarch64")]
++#[cfg(all(target_arch="aarch64", target_endian = "little"))]
+ #[cfg(has_aarch64_simd)]
+ impl GemmKernel for KernelNeon {
+     type Elem = T;
+@@ -880,7 +880,7 @@ unsafe fn kernel_x86_avx<MA>(k: usize, a
+     }
+ }
+ 
+-#[cfg(target_arch="aarch64")]
++#[cfg(all(target_arch="aarch64", target_endian = "little"))]
+ #[cfg(has_aarch64_simd)]
+ #[target_feature(enable="neon")]
+ unsafe fn kernel_target_neon(k: usize, alpha: T, a: *const T, b: *const T,
+@@ -1065,7 +1065,7 @@ mod tests {
+         }
+     }
+ 
+-    #[cfg(any(target_arch="aarch64"))]
++    #[cfg(any(all(target_arch="aarch64", target_endian = "little")))]
+     #[cfg(has_aarch64_simd)]
+     mod test_kernel_aarch64 {
+         use super::test_a_kernel;
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_matrixmultiply-0.3.8_src_sgemm__kernel.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_matrixmultiply-0.3.8_src_sgemm__kernel.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_matrixmultiply-0.3.8_src_sgemm__kernel.rs   Sat Feb 15 23:41:45 2025
@@ -0,0 +1,51 @@
+$NetBSD: patch-.._vendor_matrixmultiply-0.3.8_src_sgemm__kernel.rs,v 1.1 2025/02/15 23:41:45 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/matrixmultiply-0.3.8/src/sgemm_kernel.rs.orig    2024-10-02 14:29:39.902774317 +0000
++++ ../vendor/matrixmultiply-0.3.8/src/sgemm_kernel.rs
+@@ -27,7 +27,7 @@ struct KernelFma;
+ #[cfg(any(target_arch="x86", target_arch="x86_64"))]
+ struct KernelSse2;
+ 
+-#[cfg(target_arch="aarch64")]
++#[cfg(all(target_arch="aarch64", target_endian = "little"))]
+ #[cfg(has_aarch64_simd)]
+ struct KernelNeon;
+ struct KernelFallback;
+@@ -55,7 +55,7 @@ pub(crate) fn detect<G>(selector: G) whe
+             return selector.select(KernelSse2);
+         }
+     }
+-    #[cfg(target_arch="aarch64")]
++    #[cfg(all(target_arch="aarch64", target_endian = "little"))]
+     #[cfg(has_aarch64_simd)]
+     {
+         if is_aarch64_feature_detected_!("neon") {
+@@ -215,7 +215,7 @@ impl GemmKernel for KernelSse2 {
+ }
+ 
+ 
+-#[cfg(target_arch="aarch64")]
++#[cfg(all(target_arch="aarch64", target_endian = "little"))]
+ #[cfg(has_aarch64_simd)]
+ impl GemmKernel for KernelNeon {
+     type Elem = T;
+@@ -561,7 +561,7 @@ unsafe fn kernel_x86_avx<MA>(k: usize, a
+     }
+ }
+ 
+-#[cfg(target_arch="aarch64")]
++#[cfg(all(target_arch="aarch64", target_endian = "little"))]
+ #[cfg(has_aarch64_simd)]
+ #[target_feature(enable="neon")]
+ unsafe fn kernel_target_neon(k: usize, alpha: T, a: *const T, b: *const T,
+@@ -750,7 +750,7 @@ mod tests {
+         }
+     }
+ 
+-    #[cfg(any(target_arch="aarch64"))]
++    #[cfg(all(target_arch="aarch64", target_endian = "little"))]
+     #[cfg(has_aarch64_simd)]
+     mod test_kernel_aarch64 {
+         use super::test_a_kernel;
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_matrixmultiply-0.3.9_src_dgemm__kernel.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_matrixmultiply-0.3.9_src_dgemm__kernel.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_matrixmultiply-0.3.9_src_dgemm__kernel.rs   Sat Feb 15 23:41:46 2025
@@ -0,0 +1,51 @@
+$NetBSD: patch-.._vendor_matrixmultiply-0.3.9_src_dgemm__kernel.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/matrixmultiply-0.3.9/src/dgemm_kernel.rs.orig    2025-02-15 22:01:28.532125465 +0000
++++ ../vendor/matrixmultiply-0.3.9/src/dgemm_kernel.rs
+@@ -28,7 +28,7 @@ struct KernelFma;
+ #[cfg(any(target_arch="x86", target_arch="x86_64"))]
+ struct KernelSse2;
+ 
+-#[cfg(target_arch="aarch64")]
++#[cfg(all(target_arch="aarch64", target_endian = "little"))]
+ #[cfg(has_aarch64_simd)]
+ struct KernelNeon;
+ 
+@@ -58,7 +58,7 @@ pub(crate) fn detect<G>(selector: G) whe
+         }
+     }
+ 
+-    #[cfg(target_arch="aarch64")]
++    #[cfg(all(target_arch="aarch64", target_endian = "little"))]
+     #[cfg(has_aarch64_simd)]
+     {
+         if is_aarch64_feature_detected_!("neon") {
+@@ -232,7 +232,7 @@ impl GemmKernel for KernelSse2 {
+     }
+ }
+ 
+-#[cfg(target_arch="aarch64")]
++#[cfg(all(target_arch="aarch64", target_endian = "little"))]
+ #[cfg(has_aarch64_simd)]
+ impl GemmKernel for KernelNeon {
+     type Elem = T;
+@@ -880,7 +880,7 @@ unsafe fn kernel_x86_avx<MA>(k: usize, a
+     }
+ }
+ 
+-#[cfg(target_arch="aarch64")]
++#[cfg(all(target_arch="aarch64", target_endian = "little"))]
+ #[cfg(has_aarch64_simd)]
+ #[target_feature(enable="neon")]
+ unsafe fn kernel_target_neon(k: usize, alpha: T, a: *const T, b: *const T,
+@@ -1065,7 +1065,7 @@ mod tests {
+         }
+     }
+ 
+-    #[cfg(any(target_arch="aarch64"))]
++    #[cfg(any(all(target_arch="aarch64", target_endian = "little")))]
+     #[cfg(has_aarch64_simd)]
+     mod test_kernel_aarch64 {
+         use super::test_a_kernel;
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_matrixmultiply-0.3.9_src_sgemm__kernel.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_matrixmultiply-0.3.9_src_sgemm__kernel.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_matrixmultiply-0.3.9_src_sgemm__kernel.rs   Sat Feb 15 23:41:46 2025
@@ -0,0 +1,51 @@
+$NetBSD: patch-.._vendor_matrixmultiply-0.3.9_src_sgemm__kernel.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/matrixmultiply-0.3.9/src/sgemm_kernel.rs.orig    2025-02-15 22:03:52.950826542 +0000
++++ ../vendor/matrixmultiply-0.3.9/src/sgemm_kernel.rs
+@@ -27,7 +27,7 @@ struct KernelFma;
+ #[cfg(any(target_arch="x86", target_arch="x86_64"))]
+ struct KernelSse2;
+ 
+-#[cfg(target_arch="aarch64")]
++#[cfg(all(target_arch="aarch64", target_endian = "little"))]
+ #[cfg(has_aarch64_simd)]
+ struct KernelNeon;
+ struct KernelFallback;
+@@ -55,7 +55,7 @@ pub(crate) fn detect<G>(selector: G) whe
+             return selector.select(KernelSse2);
+         }
+     }
+-    #[cfg(target_arch="aarch64")]
++    #[cfg(all(target_arch="aarch64", target_endian = "little"))]
+     #[cfg(has_aarch64_simd)]
+     {
+         if is_aarch64_feature_detected_!("neon") {
+@@ -215,7 +215,7 @@ impl GemmKernel for KernelSse2 {
+ }
+ 
+ 
+-#[cfg(target_arch="aarch64")]
++#[cfg(all(target_arch="aarch64", target_endian = "little"))]
+ #[cfg(has_aarch64_simd)]
+ impl GemmKernel for KernelNeon {
+     type Elem = T;
+@@ -561,7 +561,7 @@ unsafe fn kernel_x86_avx<MA>(k: usize, a
+     }
+ }
+ 
+-#[cfg(target_arch="aarch64")]
++#[cfg(all(target_arch="aarch64", target_endian = "little"))]
+ #[cfg(has_aarch64_simd)]
+ #[target_feature(enable="neon")]
+ unsafe fn kernel_target_neon(k: usize, alpha: T, a: *const T, b: *const T,
+@@ -750,7 +750,7 @@ mod tests {
+         }
+     }
+ 
+-    #[cfg(any(target_arch="aarch64"))]
++    #[cfg(any(all(target_arch="aarch64", target_endian = "little")))]
+     #[cfg(has_aarch64_simd)]
+     mod test_kernel_aarch64 {
+         use super::test_a_kernel;
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_arch_aarch64_memchr.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_arch_aarch64_memchr.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_arch_aarch64_memchr.rs     Sat Feb 15 23:41:46 2025
@@ -0,0 +1,24 @@
+$NetBSD: patch-.._vendor_memchr-2.7.1_src_arch_aarch64_memchr.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/memchr-2.7.1/src/arch/aarch64/memchr.rs.orig     2006-07-24 01:21:28.000000000 +0000
++++ ../vendor/memchr-2.7.1/src/arch/aarch64/memchr.rs
+@@ -8,7 +8,7 @@ available for `aarch64` targets.)
+ 
+ macro_rules! defraw {
+     ($ty:ident, $find:ident, $start:ident, $end:ident, $($needles:ident),+) => {{
+-        #[cfg(target_feature = "neon")]
++        #[cfg(all(target_feature = "neon", target_endian = "little"))]
+         {
+             use crate::arch::aarch64::neon::memchr::$ty;
+ 
+@@ -19,7 +19,7 @@ macro_rules! defraw {
+             // enabled.
+             $ty::new_unchecked($($needles),+).$find($start, $end)
+         }
+-        #[cfg(not(target_feature = "neon"))]
++        #[cfg(not(all(target_feature = "neon", target_endian = "little")))]
+         {
+             use crate::arch::all::memchr::$ty;
+ 
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_arch_aarch64_mod.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_arch_aarch64_mod.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_arch_aarch64_mod.rs        Sat Feb 15 23:41:46 2025
@@ -0,0 +1,15 @@
+$NetBSD: patch-.._vendor_memchr-2.7.1_src_arch_aarch64_mod.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/memchr-2.7.1/src/arch/aarch64/mod.rs.orig        2006-07-24 01:21:28.000000000 +0000
++++ ../vendor/memchr-2.7.1/src/arch/aarch64/mod.rs
+@@ -2,6 +2,8 @@
+ Vector algorithms for the `aarch64` target.
+ */
+ 
++#[cfg(target_endian = "little")]
+ pub mod neon;
+ 
++#[cfg(target_endian = "little")]
+ pub(crate) mod memchr;
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_memchr.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_memchr.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_memchr.rs  Sat Feb 15 23:41:46 2025
@@ -0,0 +1,128 @@
+$NetBSD: patch-.._vendor_memchr-2.7.1_src_memchr.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/memchr-2.7.1/src/memchr.rs.orig  2006-07-24 01:21:28.000000000 +0000
++++ ../vendor/memchr-2.7.1/src/memchr.rs
+@@ -518,14 +518,14 @@ unsafe fn memchr_raw(
+     {
+         crate::arch::wasm32::memchr::memchr_raw(needle, start, end)
+     }
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     {
+         crate::arch::aarch64::memchr::memchr_raw(needle, start, end)
+     }
+     #[cfg(not(any(
+         target_arch = "x86_64",
+         target_arch = "wasm32",
+-        target_arch = "aarch64"
++        all(target_arch = "aarch64", target_endian = "little")
+     )))]
+     {
+         crate::arch::all::memchr::One::new(needle).find_raw(start, end)
+@@ -551,14 +551,14 @@ unsafe fn memrchr_raw(
+     {
+         crate::arch::wasm32::memchr::memrchr_raw(needle, start, end)
+     }
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     {
+         crate::arch::aarch64::memchr::memrchr_raw(needle, start, end)
+     }
+     #[cfg(not(any(
+         target_arch = "x86_64",
+         target_arch = "wasm32",
+-        target_arch = "aarch64"
++        all(target_arch = "aarch64", target_endian = "little")
+     )))]
+     {
+         crate::arch::all::memchr::One::new(needle).rfind_raw(start, end)
+@@ -585,14 +585,14 @@ unsafe fn memchr2_raw(
+     {
+         crate::arch::wasm32::memchr::memchr2_raw(needle1, needle2, start, end)
+     }
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     {
+         crate::arch::aarch64::memchr::memchr2_raw(needle1, needle2, start, end)
+     }
+     #[cfg(not(any(
+         target_arch = "x86_64",
+         target_arch = "wasm32",
+-        target_arch = "aarch64"
++        all(target_arch = "aarch64", target_endian = "little")
+     )))]
+     {
+         crate::arch::all::memchr::Two::new(needle1, needle2)
+@@ -620,7 +620,7 @@ unsafe fn memrchr2_raw(
+     {
+         crate::arch::wasm32::memchr::memrchr2_raw(needle1, needle2, start, end)
+     }
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     {
+         crate::arch::aarch64::memchr::memrchr2_raw(
+             needle1, needle2, start, end,
+@@ -629,7 +629,7 @@ unsafe fn memrchr2_raw(
+     #[cfg(not(any(
+         target_arch = "x86_64",
+         target_arch = "wasm32",
+-        target_arch = "aarch64"
++        all(target_arch = "aarch64", target_endian = "little")
+     )))]
+     {
+         crate::arch::all::memchr::Two::new(needle1, needle2)
+@@ -662,7 +662,7 @@ unsafe fn memchr3_raw(
+             needle1, needle2, needle3, start, end,
+         )
+     }
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     {
+         crate::arch::aarch64::memchr::memchr3_raw(
+             needle1, needle2, needle3, start, end,
+@@ -671,7 +671,7 @@ unsafe fn memchr3_raw(
+     #[cfg(not(any(
+         target_arch = "x86_64",
+         target_arch = "wasm32",
+-        target_arch = "aarch64"
++        all(target_arch = "aarch64", target_endian = "little")
+     )))]
+     {
+         crate::arch::all::memchr::Three::new(needle1, needle2, needle3)
+@@ -704,7 +704,7 @@ unsafe fn memrchr3_raw(
+             needle1, needle2, needle3, start, end,
+         )
+     }
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     {
+         crate::arch::aarch64::memchr::memrchr3_raw(
+             needle1, needle2, needle3, start, end,
+@@ -713,7 +713,7 @@ unsafe fn memrchr3_raw(
+     #[cfg(not(any(
+         target_arch = "x86_64",
+         target_arch = "wasm32",
+-        target_arch = "aarch64"
++        all(target_arch = "aarch64", target_endian = "little")
+     )))]
+     {
+         crate::arch::all::memchr::Three::new(needle1, needle2, needle3)
+@@ -736,14 +736,14 @@ unsafe fn count_raw(needle: u8, start: *
+     {
+         crate::arch::wasm32::memchr::count_raw(needle, start, end)
+     }
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     {
+         crate::arch::aarch64::memchr::count_raw(needle, start, end)
+     }
+     #[cfg(not(any(
+         target_arch = "x86_64",
+         target_arch = "wasm32",
+-        target_arch = "aarch64"
++        all(target_arch = "aarch64", target_endian = "little")
+     )))]
+     {
+         crate::arch::all::memchr::One::new(needle).count_raw(start, end)
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_memmem_searcher.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_memmem_searcher.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_memmem_searcher.rs Sat Feb 15 23:41:46 2025
@@ -0,0 +1,78 @@
+$NetBSD: patch-.._vendor_memchr-2.7.1_src_memmem_searcher.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/memchr-2.7.1/src/memmem/searcher.rs.orig 2006-07-24 01:21:28.000000000 +0000
++++ ../vendor/memchr-2.7.1/src/memmem/searcher.rs
+@@ -3,7 +3,7 @@ use crate::arch::all::{
+     rabinkarp, twoway,
+ };
+ 
+-#[cfg(target_arch = "aarch64")]
++#[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+ use crate::arch::aarch64::neon::packedpair as neon;
+ #[cfg(target_arch = "wasm32")]
+ use crate::arch::wasm32::simd128::packedpair as simd128;
+@@ -129,7 +129,7 @@ impl Searcher {
+                 Searcher::twoway(needle, rabinkarp, prestrat)
+             }
+         }
+-        #[cfg(target_arch = "aarch64")]
++        #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+         {
+             if let Some(pp) = neon::Finder::with_pair(needle, pair) {
+                 if do_packed_search(needle) {
+@@ -152,7 +152,7 @@ impl Searcher {
+         #[cfg(not(any(
+             all(target_arch = "x86_64", target_feature = "sse2"),
+             target_arch = "wasm32",
+-            target_arch = "aarch64"
++            all(target_arch = "aarch64", target_endian = "little")
+         )))]
+         {
+             if prefilter.is_none() {
+@@ -253,7 +253,7 @@ union SearcherKind {
+     avx2: crate::arch::x86_64::avx2::packedpair::Finder,
+     #[cfg(target_arch = "wasm32")]
+     simd128: crate::arch::wasm32::simd128::packedpair::Finder,
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     neon: crate::arch::aarch64::neon::packedpair::Finder,
+ }
+ 
+@@ -421,7 +421,7 @@ unsafe fn searcher_kind_simd128(
+ /// # Safety
+ ///
+ /// Callers must ensure that the `searcher.kind.neon` union field is set.
+-#[cfg(target_arch = "aarch64")]
++#[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+ unsafe fn searcher_kind_neon(
+     searcher: &Searcher,
+     _prestate: &mut PrefilterState,
+@@ -686,7 +686,7 @@ impl Prefilter {
+     }
+ 
+     /// Return a prefilter using a aarch64 neon vector algorithm.
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     #[inline]
+     fn neon(finder: neon::Finder, needle: &[u8]) -> Prefilter {
+         trace!("building aarch64 neon prefilter");
+@@ -763,7 +763,7 @@ union PrefilterKind {
+     avx2: crate::arch::x86_64::avx2::packedpair::Finder,
+     #[cfg(target_arch = "wasm32")]
+     simd128: crate::arch::wasm32::simd128::packedpair::Finder,
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     neon: crate::arch::aarch64::neon::packedpair::Finder,
+ }
+ 
+@@ -852,7 +852,7 @@ unsafe fn prefilter_kind_simd128(
+ /// # Safety
+ ///
+ /// Callers must ensure that the `strat.kind.neon` union field is set.
+-#[cfg(target_arch = "aarch64")]
++#[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+ unsafe fn prefilter_kind_neon(
+     strat: &Prefilter,
+     haystack: &[u8],
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_vector.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_vector.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.1_src_vector.rs  Sat Feb 15 23:41:46 2025
@@ -0,0 +1,15 @@
+$NetBSD: patch-.._vendor_memchr-2.7.1_src_vector.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/memchr-2.7.1/src/vector.rs.orig  2006-07-24 01:21:28.000000000 +0000
++++ ../vendor/memchr-2.7.1/src/vector.rs
+@@ -293,7 +293,7 @@ mod x86avx2 {
+     }
+ }
+ 
+-#[cfg(target_arch = "aarch64")]
++#[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+ mod aarch64neon {
+     use core::arch::aarch64::*;
+ 
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_arch_aarch64_memchr.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_arch_aarch64_memchr.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_arch_aarch64_memchr.rs     Sat Feb 15 23:41:46 2025
@@ -0,0 +1,24 @@
+$NetBSD: patch-.._vendor_memchr-2.7.4_src_arch_aarch64_memchr.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Don't try to do neon / SIMD on big-endian aarch64.
+
+--- ../vendor/memchr-2.7.4/src/arch/aarch64/memchr.rs.orig     2025-02-15 20:48:25.871984028 +0000
++++ ../vendor/memchr-2.7.4/src/arch/aarch64/memchr.rs
+@@ -8,7 +8,7 @@ available for `aarch64` targets.)
+ 
+ macro_rules! defraw {
+     ($ty:ident, $find:ident, $start:ident, $end:ident, $($needles:ident),+) => {{
+-        #[cfg(target_feature = "neon")]
++        #[cfg(all(target_feature = "neon", target_endian = "little"))]
+         {
+             use crate::arch::aarch64::neon::memchr::$ty;
+ 
+@@ -19,7 +19,7 @@ macro_rules! defraw {
+             // enabled.
+             $ty::new_unchecked($($needles),+).$find($start, $end)
+         }
+-        #[cfg(not(target_feature = "neon"))]
++        #[cfg(not(all(target_feature = "neon", target_endian = "little"))]
+         {
+             use crate::arch::all::memchr::$ty;
+ 
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_arch_aarch64_mod.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_arch_aarch64_mod.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_arch_aarch64_mod.rs        Sat Feb 15 23:41:46 2025
@@ -0,0 +1,15 @@
+$NetBSD: patch-.._vendor_memchr-2.7.4_src_arch_aarch64_mod.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Don't try to do neon / SIMD on big-endian aarch64.
+
+--- ../vendor/memchr-2.7.4/src/arch/aarch64/mod.rs.orig        2025-02-15 20:46:55.895217561 +0000
++++ ../vendor/memchr-2.7.4/src/arch/aarch64/mod.rs
+@@ -2,6 +2,8 @@
+ Vector algorithms for the `aarch64` target.
+ */
+ 
++#[cfg(target_endian = "little")]
+ pub mod neon;
+ 
++#[cfg(target_endian = "little")]
+ pub(crate) mod memchr;
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_memchr.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_memchr.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_memchr.rs  Sat Feb 15 23:41:46 2025
@@ -0,0 +1,128 @@
+$NetBSD: patch-.._vendor_memchr-2.7.4_src_memchr.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Don't try to do neon / SIMD on big-endian aarch64.
+
+--- ../vendor/memchr-2.7.4/src/memchr.rs.orig  2025-02-15 20:50:05.328306921 +0000
++++ ../vendor/memchr-2.7.4/src/memchr.rs
+@@ -518,14 +518,14 @@ unsafe fn memchr_raw(
+     {
+         crate::arch::wasm32::memchr::memchr_raw(needle, start, end)
+     }
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     {
+         crate::arch::aarch64::memchr::memchr_raw(needle, start, end)
+     }
+     #[cfg(not(any(
+         target_arch = "x86_64",
+         all(target_arch = "wasm32", target_feature = "simd128"),
+-        target_arch = "aarch64"
++        all(target_arch = "aarch64", target_endian = "little")
+     )))]
+     {
+         crate::arch::all::memchr::One::new(needle).find_raw(start, end)
+@@ -551,14 +551,14 @@ unsafe fn memrchr_raw(
+     {
+         crate::arch::wasm32::memchr::memrchr_raw(needle, start, end)
+     }
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     {
+         crate::arch::aarch64::memchr::memrchr_raw(needle, start, end)
+     }
+     #[cfg(not(any(
+         target_arch = "x86_64",
+         all(target_arch = "wasm32", target_feature = "simd128"),
+-        target_arch = "aarch64"
++        all(target_arch = "aarch64", target_endian = "little")
+     )))]
+     {
+         crate::arch::all::memchr::One::new(needle).rfind_raw(start, end)
+@@ -585,14 +585,14 @@ unsafe fn memchr2_raw(
+     {
+         crate::arch::wasm32::memchr::memchr2_raw(needle1, needle2, start, end)
+     }
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     {
+         crate::arch::aarch64::memchr::memchr2_raw(needle1, needle2, start, end)
+     }
+     #[cfg(not(any(
+         target_arch = "x86_64",
+         all(target_arch = "wasm32", target_feature = "simd128"),
+-        target_arch = "aarch64"
++        all(target_arch = "aarch64", target_endian = "little")
+     )))]
+     {
+         crate::arch::all::memchr::Two::new(needle1, needle2)
+@@ -620,7 +620,7 @@ unsafe fn memrchr2_raw(
+     {
+         crate::arch::wasm32::memchr::memrchr2_raw(needle1, needle2, start, end)
+     }
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     {
+         crate::arch::aarch64::memchr::memrchr2_raw(
+             needle1, needle2, start, end,
+@@ -629,7 +629,7 @@ unsafe fn memrchr2_raw(
+     #[cfg(not(any(
+         target_arch = "x86_64",
+         all(target_arch = "wasm32", target_feature = "simd128"),
+-        target_arch = "aarch64"
++        all(target_arch = "aarch64", target_endian = "little")
+     )))]
+     {
+         crate::arch::all::memchr::Two::new(needle1, needle2)
+@@ -662,7 +662,7 @@ unsafe fn memchr3_raw(
+             needle1, needle2, needle3, start, end,
+         )
+     }
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     {
+         crate::arch::aarch64::memchr::memchr3_raw(
+             needle1, needle2, needle3, start, end,
+@@ -671,7 +671,7 @@ unsafe fn memchr3_raw(
+     #[cfg(not(any(
+         target_arch = "x86_64",
+         all(target_arch = "wasm32", target_feature = "simd128"),
+-        target_arch = "aarch64"
++        all(target_arch = "aarch64", target_endian = "little")
+     )))]
+     {
+         crate::arch::all::memchr::Three::new(needle1, needle2, needle3)
+@@ -704,7 +704,7 @@ unsafe fn memrchr3_raw(
+             needle1, needle2, needle3, start, end,
+         )
+     }
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     {
+         crate::arch::aarch64::memchr::memrchr3_raw(
+             needle1, needle2, needle3, start, end,
+@@ -713,7 +713,7 @@ unsafe fn memrchr3_raw(
+     #[cfg(not(any(
+         target_arch = "x86_64",
+         all(target_arch = "wasm32", target_feature = "simd128"),
+-        target_arch = "aarch64"
++        all(target_arch = "aarch64", target_endian = "little")
+     )))]
+     {
+         crate::arch::all::memchr::Three::new(needle1, needle2, needle3)
+@@ -736,14 +736,14 @@ unsafe fn count_raw(needle: u8, start: *
+     {
+         crate::arch::wasm32::memchr::count_raw(needle, start, end)
+     }
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     {
+         crate::arch::aarch64::memchr::count_raw(needle, start, end)
+     }
+     #[cfg(not(any(
+         target_arch = "x86_64",
+         all(target_arch = "wasm32", target_feature = "simd128"),
+-        target_arch = "aarch64"
++        all(target_arch = "aarch64", target_endian = "little")
+     )))]
+     {
+         crate::arch::all::memchr::One::new(needle).count_raw(start, end)
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_memmem_searcher.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_memmem_searcher.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_memmem_searcher.rs Sat Feb 15 23:41:46 2025
@@ -0,0 +1,78 @@
+$NetBSD: patch-.._vendor_memchr-2.7.4_src_memmem_searcher.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Don't try to do neon / SIMD on big-endian aarch64.
+
+--- ../vendor/memchr-2.7.4/src/memmem/searcher.rs.orig 2025-02-15 20:54:13.327453262 +0000
++++ ../vendor/memchr-2.7.4/src/memmem/searcher.rs
+@@ -3,7 +3,7 @@ use crate::arch::all::{
+     rabinkarp, twoway,
+ };
+ 
+-#[cfg(target_arch = "aarch64")]
++#[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+ use crate::arch::aarch64::neon::packedpair as neon;
+ #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))]
+ use crate::arch::wasm32::simd128::packedpair as simd128;
+@@ -129,7 +129,7 @@ impl Searcher {
+                 Searcher::twoway(needle, rabinkarp, prestrat)
+             }
+         }
+-        #[cfg(target_arch = "aarch64")]
++        #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+         {
+             if let Some(pp) = neon::Finder::with_pair(needle, pair) {
+                 if do_packed_search(needle) {
+@@ -152,7 +152,7 @@ impl Searcher {
+         #[cfg(not(any(
+             all(target_arch = "x86_64", target_feature = "sse2"),
+             all(target_arch = "wasm32", target_feature = "simd128"),
+-            target_arch = "aarch64"
++            all(target_arch = "aarch64", target_endian = "little")
+         )))]
+         {
+             if prefilter.is_none() {
+@@ -253,7 +253,7 @@ union SearcherKind {
+     avx2: crate::arch::x86_64::avx2::packedpair::Finder,
+     #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))]
+     simd128: crate::arch::wasm32::simd128::packedpair::Finder,
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     neon: crate::arch::aarch64::neon::packedpair::Finder,
+ }
+ 
+@@ -421,7 +421,7 @@ unsafe fn searcher_kind_simd128(
+ /// # Safety
+ ///
+ /// Callers must ensure that the `searcher.kind.neon` union field is set.
+-#[cfg(target_arch = "aarch64")]
++#[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+ unsafe fn searcher_kind_neon(
+     searcher: &Searcher,
+     _prestate: &mut PrefilterState,
+@@ -686,7 +686,7 @@ impl Prefilter {
+     }
+ 
+     /// Return a prefilter using a aarch64 neon vector algorithm.
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     #[inline]
+     fn neon(finder: neon::Finder, needle: &[u8]) -> Prefilter {
+         trace!("building aarch64 neon prefilter");
+@@ -763,7 +763,7 @@ union PrefilterKind {
+     avx2: crate::arch::x86_64::avx2::packedpair::Finder,
+     #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))]
+     simd128: crate::arch::wasm32::simd128::packedpair::Finder,
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     neon: crate::arch::aarch64::neon::packedpair::Finder,
+ }
+ 
+@@ -852,7 +852,7 @@ unsafe fn prefilter_kind_simd128(
+ /// # Safety
+ ///
+ /// Callers must ensure that the `strat.kind.neon` union field is set.
+-#[cfg(target_arch = "aarch64")]
++#[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+ unsafe fn prefilter_kind_neon(
+     strat: &Prefilter,
+     haystack: &[u8],
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_vector.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_vector.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_memchr-2.7.4_src_vector.rs  Sat Feb 15 23:41:46 2025
@@ -0,0 +1,15 @@
+$NetBSD: patch-.._vendor_memchr-2.7.4_src_vector.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Don't try to do neon / SIMD on big-endian aarch64.
+
+--- ../vendor/memchr-2.7.4/src/vector.rs.orig  2025-02-15 20:56:52.793849071 +0000
++++ ../vendor/memchr-2.7.4/src/vector.rs
+@@ -289,7 +289,7 @@ mod x86avx2 {
+     }
+ }
+ 
+-#[cfg(target_arch = "aarch64")]
++#[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+ mod aarch64neon {
+     use core::arch::aarch64::*;
+ 
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_f32x4__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_f32x4__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_f32x4__.rs  Sat Feb 15 23:41:46 2025
@@ -0,0 +1,249 @@
+$NetBSD: patch-.._vendor_wide-0.7.15_src_f32x4__.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD in big-endian mode on aarch64.
+
+--- ../vendor/wide-0.7.15/src/f32x4_.rs.orig   2024-10-02 14:01:57.610101164 +0000
++++ ../vendor/wide-0.7.15/src/f32x4_.rs
+@@ -23,7 +23,7 @@ pick! {
+         u32x4_all_true(f32x4_eq(self.simd, other.simd))
+       }
+     }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))] {
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -97,7 +97,7 @@ impl Add for f32x4 {
+         Self { sse: add_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vaddq_f32(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -121,7 +121,7 @@ impl Sub for f32x4 {
+         Self { sse: sub_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vsubq_f32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -145,7 +145,7 @@ impl Mul for f32x4 {
+         Self { sse: mul_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_mul(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vmulq_f32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -169,7 +169,7 @@ impl Div for f32x4 {
+         Self { sse: div_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_div(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vdivq_f32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -265,7 +265,7 @@ impl BitAnd for f32x4 {
+         Self { sse: bitand_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(self.neon), vreinterpretq_u32_f32(rhs.neon))) }}
+       } else {
+         Self { arr: [
+@@ -289,7 +289,7 @@ impl BitOr for f32x4 {
+         Self { sse: bitor_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(self.neon), vreinterpretq_u32_f32(rhs.neon))) }}
+       } else {
+         Self { arr: [
+@@ -313,7 +313,7 @@ impl BitXor for f32x4 {
+         Self { sse: bitxor_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(self.neon), vreinterpretq_u32_f32(rhs.neon))) }}
+       } else {
+         Self { arr: [
+@@ -337,7 +337,7 @@ impl CmpEq for f32x4 {
+         Self { sse: cmp_eq_mask_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vceqq_f32(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -361,7 +361,7 @@ impl CmpGe for f32x4 {
+         Self { sse: cmp_ge_mask_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_ge(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vcgeq_f32(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -385,7 +385,7 @@ impl CmpGt for f32x4 {
+         Self { sse: cmp_gt_mask_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_gt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vcgtq_f32(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -409,7 +409,7 @@ impl CmpNe for f32x4 {
+         Self { sse: cmp_neq_mask_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_ne(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(self.neon, rhs.neon))) }}
+       } else {
+         Self { arr: [
+@@ -433,7 +433,7 @@ impl CmpLe for f32x4 {
+         Self { sse: cmp_le_mask_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_le(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vcleq_f32(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -457,7 +457,7 @@ impl CmpLt for f32x4 {
+         Self { sse: cmp_lt_mask_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_lt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vcltq_f32(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -497,7 +497,7 @@ impl f32x4 {
+     pick! {
+       if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_abs(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vabsq_f32(self.neon) }}
+       } else {
+         let non_sign_bits = f32x4::from(f32::from_bits(i32::MAX as u32));
+@@ -519,7 +519,7 @@ impl f32x4 {
+         Self {
+           simd: f32x4_pmax(self.simd, rhs.simd),
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vmaxq_f32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -558,7 +558,7 @@ impl f32x4 {
+             f32x4_ne(self.simd, self.simd), // NaN check
+           )
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vmaxnmq_f32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -584,7 +584,7 @@ impl f32x4 {
+         Self {
+           simd: f32x4_pmin(self.simd, rhs.simd),
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vminq_f32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -623,7 +623,7 @@ impl f32x4 {
+             f32x4_ne(self.simd, self.simd), // NaN check
+           )
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vminnmq_f32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -643,7 +643,7 @@ impl f32x4 {
+         Self { sse: cmp_unord_mask_m128(self.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_ne(self.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(self.neon, self.neon))) }}
+       } else {
+         Self { arr: [
+@@ -688,7 +688,7 @@ impl f32x4 {
+         mask.blend(self, f)
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_nearest(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vrndnq_f32(self.neon) }}
+       } else {
+         // Note(Lokathor): This software fallback is probably very slow compared
+@@ -752,7 +752,7 @@ impl f32x4 {
+         flip_to_max ^ cast
+       } else if #[cfg(target_feature="simd128")] {
+         cast(Self { simd: i32x4_trunc_sat_f32x4(f32x4_nearest(self.simd)) })
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         cast(unsafe {Self { neon: vreinterpretq_f32_s32(vcvtnq_s32_f32(self.neon)) }})
+       } else {
+         let rounded: [f32; 4] = cast(self.round());
+@@ -797,7 +797,7 @@ impl f32x4 {
+         flip_to_max ^ cast
+       } else if #[cfg(target_feature="simd128")] {
+         cast(Self { simd: i32x4_trunc_sat_f32x4(self.simd) })
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         cast(unsafe {Self { neon: vreinterpretq_f32_s32(vcvtq_s32_f32(self.neon)) }})
+       } else {
+         let n: [f32;4] = cast(self);
+@@ -1170,7 +1170,7 @@ impl f32x4 {
+         Self { sse: reciprocal_m128(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_div(f32x4_splat(1.0), self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vdivq_f32(vdupq_n_f32(1.0), self.neon) }}
+       } else {
+         Self { arr: [
+@@ -1190,7 +1190,7 @@ impl f32x4 {
+         Self { sse: reciprocal_sqrt_m128(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_div(f32x4_splat(1.0), f32x4_sqrt(self.simd)) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vdivq_f32(vdupq_n_f32(1.0), vsqrtq_f32(self.neon)) }}
+       } else if #[cfg(feature="std")] {
+         Self { arr: [
+@@ -1217,7 +1217,7 @@ impl f32x4 {
+         Self { sse: sqrt_m128(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_sqrt(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vsqrtq_f32(self.neon) }}
+       } else if #[cfg(feature="std")] {
+         Self { arr: [
+@@ -1245,7 +1245,7 @@ impl f32x4 {
+         move_mask_m128(self.sse)
+       } else if #[cfg(target_feature="simd128")] {
+         u32x4_bitmask(self.simd) as i32
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe
+         {
+           // set all to 1 if top bit is set, else 0
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_f64x2__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_f64x2__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_f64x2__.rs  Sat Feb 15 23:41:46 2025
@@ -0,0 +1,222 @@
+$NetBSD: patch-.._vendor_wide-0.7.15_src_f64x2__.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD in big-endian mode on aarch64.
+
+--- ../vendor/wide-0.7.15/src/f64x2_.rs.orig   2024-10-02 14:04:14.119046164 +0000
++++ ../vendor/wide-0.7.15/src/f64x2_.rs
+@@ -23,7 +23,7 @@ pick! {
+         u64x2_all_true(f64x2_eq(self.simd, other.simd))
+       }
+     }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -100,7 +100,7 @@ impl Add for f64x2 {
+         Self { sse: add_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vaddq_f64(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -122,7 +122,7 @@ impl Sub for f64x2 {
+         Self { sse: sub_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vsubq_f64(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -144,7 +144,7 @@ impl Mul for f64x2 {
+         Self { sse: mul_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_mul(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vmulq_f64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -166,7 +166,7 @@ impl Div for f64x2 {
+         Self { sse: div_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_div(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vdivq_f64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -260,7 +260,7 @@ impl BitAnd for f64x2 {
+         Self { sse: bitand_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(self.neon), vreinterpretq_u64_f64(rhs.neon))) }}
+       } else {
+         Self { arr: [
+@@ -282,7 +282,7 @@ impl BitOr for f64x2 {
+         Self { sse: bitor_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(self.neon), vreinterpretq_u64_f64(rhs.neon))) }}
+       } else {
+         Self { arr: [
+@@ -304,7 +304,7 @@ impl BitXor for f64x2 {
+         Self { sse: bitxor_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(self.neon), vreinterpretq_u64_f64(rhs.neon))) }}
+       } else {
+         Self { arr: [
+@@ -326,7 +326,7 @@ impl CmpEq for f64x2 {
+         Self { sse: cmp_eq_mask_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vceqq_f64(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -348,7 +348,7 @@ impl CmpGe for f64x2 {
+         Self { sse: cmp_ge_mask_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_ge(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vcgeq_f64(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -372,7 +372,7 @@ impl CmpGt for f64x2 {
+         Self { sse: cmp_gt_mask_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_gt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vcgtq_f64(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -394,7 +394,7 @@ impl CmpNe for f64x2 {
+         Self { sse: cmp_neq_mask_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_ne(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vceqq_f64(self.neon, rhs.neon)) }.not() }
+       } else {
+         Self { arr: [
+@@ -416,7 +416,7 @@ impl CmpLe for f64x2 {
+         Self { sse: cmp_le_mask_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_le(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vcleq_f64(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -438,7 +438,7 @@ impl CmpLt for f64x2 {
+         Self { sse: cmp_lt_mask_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_lt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vcltq_f64(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -475,7 +475,7 @@ impl f64x2 {
+     pick! {
+       if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_abs(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vabsq_f64(self.neon) }}
+       } else {
+         let non_sign_bits = f64x2::from(f64::from_bits(i64::MAX as u64));
+@@ -497,7 +497,7 @@ impl f64x2 {
+         Self {
+           simd: f64x2_pmax(self.simd, rhs.simd),
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vmaxq_f64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -534,7 +534,7 @@ impl f64x2 {
+             f64x2_ne(self.simd, self.simd), // NaN check
+           )
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vmaxnmq_f64(self.neon, rhs.neon) }}
+             } else {
+         Self { arr: [
+@@ -558,7 +558,7 @@ impl f64x2 {
+         Self {
+           simd: f64x2_pmin(self.simd, rhs.simd),
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vminq_f64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -595,7 +595,7 @@ impl f64x2 {
+             f64x2_ne(self.simd, self.simd), // NaN check
+           )
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vminnmq_f64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -614,7 +614,7 @@ impl f64x2 {
+         Self { sse: cmp_unord_mask_m128d(self.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_ne(self.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vceqq_f64(self.neon, self.neon)) }.not() }
+       } else {
+         Self { arr: [
+@@ -1229,7 +1229,7 @@ impl f64x2 {
+         Self { sse: sqrt_m128d(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_sqrt(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vsqrtq_f64(self.neon) }}
+       } else if #[cfg(feature="std")] {
+         Self { arr: [
+@@ -1252,7 +1252,7 @@ impl f64x2 {
+         move_mask_m128d(self.sse)
+       } else if #[cfg(target_feature="simd128")] {
+         u64x2_bitmask(self.simd) as i32
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe
+         {
+           let e = vreinterpretq_u64_f64(self.neon);
+@@ -1398,7 +1398,7 @@ impl f64x2 {
+       } else if #[cfg(any(target_feature="sse2", target_feature="simd128"))] {
+         let a: [f64;2] = cast(self);
+         a.iter().sum()
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { vgetq_lane_f64(self.neon,0) + vgetq_lane_f64(self.neon,1) }
+       } else {
+         self.arr.iter().sum()
+@@ -1612,7 +1612,7 @@ impl Not for f64x2 {
+         Self { sse: self.sse.not() }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_not(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u32(vmvnq_u32(vreinterpretq_u32_f64(self.neon))) }}
+       } else {
+         Self { arr: [
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_i16x8__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_i16x8__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_i16x8__.rs  Sat Feb 15 23:41:46 2025
@@ -0,0 +1,231 @@
+$NetBSD: patch-.._vendor_wide-0.7.15_src_i16x8__.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/wide-0.7.15/src/i16x8_.rs.orig   2024-10-02 14:06:08.049939294 +0000
++++ ../vendor/wide-0.7.15/src/i16x8_.rs
+@@ -25,7 +25,7 @@ pick! {
+     }
+ 
+     impl Eq for i16x8 { }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -70,7 +70,7 @@ impl Add for i16x8 {
+         Self { sse: add_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vaddq_s16(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -98,7 +98,7 @@ impl Sub for i16x8 {
+         Self { sse: sub_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vsubq_s16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -126,7 +126,7 @@ impl Mul for i16x8 {
+         Self { sse: mul_i16_keep_low_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_mul(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vmulq_s16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -208,7 +208,7 @@ impl BitAnd for i16x8 {
+         Self { sse: bitand_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vandq_s16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -236,7 +236,7 @@ impl BitOr for i16x8 {
+         Self { sse: bitor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vorrq_s16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -264,7 +264,7 @@ impl BitXor for i16x8 {
+         Self { sse: bitxor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: veorq_s16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -296,7 +296,7 @@ macro_rules! impl_shl_t_for_i16x8 {
+             Self { sse: shl_all_u16_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: i16x8_shl(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+             unsafe {Self { neon: vshlq_s16(self.neon, vmovq_n_s16(rhs as i16)) }}
+           } else {
+             let u = rhs as u64;
+@@ -332,7 +332,7 @@ macro_rules! impl_shr_t_for_i16x8 {
+             Self { sse: shr_all_i16_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: i16x8_shr(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+             unsafe {Self { neon: vshlq_s16(self.neon, vmovq_n_s16( -(rhs as i16))) }}
+           } else {
+             let u = rhs as u64;
+@@ -364,7 +364,7 @@ impl CmpEq for i16x8 {
+         Self { sse: cmp_eq_mask_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_s16_u16(vceqq_s16(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -392,7 +392,7 @@ impl CmpGt for i16x8 {
+         Self { sse: cmp_gt_mask_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_gt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_s16_u16(vcgtq_s16(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -420,7 +420,7 @@ impl CmpLt for i16x8 {
+         Self { sse: cmp_lt_mask_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_lt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_s16_u16(vcltq_s16(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -453,7 +453,7 @@ impl i16x8 {
+         move_mask_i8_m128i( pack_i16_to_i8_m128i(self.sse,self.sse)) & 0xff
+       } else if #[cfg(target_feature="simd128")] {
+         i16x8_bitmask(self.simd) as i32
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe
+         {
+           // set all to 1 if top bit is set, else 0
+@@ -550,7 +550,7 @@ impl i16x8 {
+         use core::arch::wasm32::*;
+ 
+         i16x8 { simd: i16x8_narrow_i32x4(v.a.simd, v.b.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))] {
+         use core::arch::aarch64::*;
+ 
+         unsafe {
+@@ -620,7 +620,7 @@ impl i16x8 {
+         unsafe { Self { sse: load_unaligned_m128i( &*(input.as_ptr() as * const [u8;16]) ) } }
+       } else if #[cfg(target_feature="simd128")] {
+         unsafe { Self { simd: v128_load(input.as_ptr() as *const v128 ) } }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vld1q_s16( input.as_ptr() as *const i16 ) } }
+       } else {
+         // 2018 edition doesn't have try_into
+@@ -637,7 +637,7 @@ impl i16x8 {
+         Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vbslq_s16(vreinterpretq_u16_s16(self.neon), t.neon, f.neon) }}
+       } else {
+         generic_bit_blend(self, t, f)
+@@ -693,7 +693,7 @@ impl i16x8 {
+         Self { sse: abs_i16_m128i(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_abs(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vabsq_s16(self.neon) }}
+       } else {
+         self.is_negative().blend(self.neg(), self)
+@@ -708,7 +708,7 @@ impl i16x8 {
+         Self { sse: max_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_max(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vmaxq_s16(self.neon, rhs.neon) }}
+       } else {
+         self.cmp_lt(rhs).blend(rhs, self)
+@@ -723,7 +723,7 @@ impl i16x8 {
+         Self { sse: min_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_min(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vminq_s16(self.neon, rhs.neon) }}
+       } else {
+         self.cmp_lt(rhs).blend(self, rhs)
+@@ -739,7 +739,7 @@ impl i16x8 {
+         Self { sse: add_saturating_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_add_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vqaddq_s16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -763,7 +763,7 @@ impl i16x8 {
+         Self { sse: sub_saturating_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_sub_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vqsubq_s16(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -792,7 +792,7 @@ impl i16x8 {
+         i32x4 { sse:  mul_i16_horizontal_add_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         i32x4 { simd: i32x4_dot_i16x8(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {
+           let pl = vmull_s16(vget_low_s16(self.neon),  vget_low_s16(rhs.neon));
+           let ph = vmull_high_s16(self.neon, rhs.neon);
+@@ -835,7 +835,7 @@ impl i16x8 {
+         Self { sse: s }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_q15mulr_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vqrdmulhq_s16(self.neon, rhs.neon) } }
+       } else {
+         // compiler does a surprisingly good job of vectorizing this
+@@ -887,7 +887,7 @@ impl i16x8 {
+           i16x8 { sse: unpack_low_i64_m128i(b4, b8) },
+           i16x8 { sse: unpack_high_i64_m128i(b4, b8) } ,
+         ]
+-     } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++     } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+ 
+           #[inline] fn vtrq32(a : int16x8_t, b : int16x8_t) -> (int16x8_t, int16x8_t)
+           {
+@@ -1015,7 +1015,7 @@ impl i16x8 {
+         Self { sse: s }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_q15mulr_sat(self.simd, i16x8_splat(rhs)) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vqrdmulhq_n_s16(self.neon, rhs) } }
+       } else {
+         // compiler does a surprisingly good job of vectorizing this
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_i32x4__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_i32x4__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_i32x4__.rs  Sat Feb 15 23:41:46 2025
@@ -0,0 +1,159 @@
+$NetBSD: patch-.._vendor_wide-0.7.15_src_i32x4__.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD in big-endian mode on aarch64.
+
+--- ../vendor/wide-0.7.15/src/i32x4_.rs.orig   2024-10-02 14:08:01.778610646 +0000
++++ ../vendor/wide-0.7.15/src/i32x4_.rs
+@@ -25,7 +25,7 @@ pick! {
+     }
+ 
+     impl Eq for i32x4 { }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -70,7 +70,7 @@ impl Add for i32x4 {
+         Self { sse: add_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i32x4_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vaddq_s32(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -94,7 +94,7 @@ impl Sub for i32x4 {
+         Self { sse: sub_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i32x4_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vsubq_s32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -118,7 +118,7 @@ impl Mul for i32x4 {
+         Self { sse: mul_32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i32x4_mul(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vmulq_s32(self.neon, rhs.neon) }}
+       } else {
+         let arr1: [i32; 4] = cast(self);
+@@ -198,7 +198,7 @@ impl BitAnd for i32x4 {
+         Self { sse: bitand_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vandq_s32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -222,7 +222,7 @@ impl BitOr for i32x4 {
+         Self { sse: bitor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vorrq_s32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -246,7 +246,7 @@ impl BitXor for i32x4 {
+         Self { sse: bitxor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: veorq_s32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -274,7 +274,7 @@ macro_rules! impl_shl_t_for_i32x4 {
+             Self { sse: shl_all_u32_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: i32x4_shl(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+             unsafe {Self { neon: vshlq_s32(self.neon, vmovq_n_s32(rhs as i32)) }}
+           } else {
+             let u = rhs as u64;
+@@ -306,7 +306,7 @@ macro_rules! impl_shr_t_for_i32x4 {
+             Self { sse: shr_all_i32_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: i32x4_shr(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+             unsafe {Self { neon: vshlq_s32(self.neon, vmovq_n_s32( -(rhs as i32))) }}
+           } else {
+             let u = rhs as u64;
+@@ -334,7 +334,7 @@ impl CmpEq for i32x4 {
+         Self { sse: cmp_eq_mask_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i32x4_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_s32_u32(vceqq_s32(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -358,7 +358,7 @@ impl CmpGt for i32x4 {
+         Self { sse: cmp_gt_mask_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i32x4_gt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_s32_u32(vcgtq_s32(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -382,7 +382,7 @@ impl CmpLt for i32x4 {
+         Self { sse: cmp_lt_mask_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i32x4_lt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_s32_u32(vcltq_s32(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -410,7 +410,7 @@ impl i32x4 {
+         Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vbslq_s32(vreinterpretq_u32_s32(self.neon), t.neon, f.neon) }}
+       } else {
+         generic_bit_blend(self, t, f)
+@@ -425,7 +425,7 @@ impl i32x4 {
+         Self { sse: abs_i32_m128i(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i32x4_abs(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vabsq_s32(self.neon) }}
+       } else {
+         let arr: [i32; 4] = cast(self);
+@@ -460,7 +460,7 @@ impl i32x4 {
+         Self { sse: min_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i32x4_min(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vminq_s32(self.neon, rhs.neon) }}
+       } else {
+         self.cmp_lt(rhs).blend(self, rhs)
+@@ -475,7 +475,7 @@ impl i32x4 {
+         cast(convert_to_m128_from_i32_m128i(self.sse))
+       } else if #[cfg(target_feature="simd128")] {
+         cast(Self { simd: f32x4_convert_i32x4(self.simd) })
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         cast(unsafe {Self { neon: vreinterpretq_s32_f32(vcvtq_f32_s32(self.neon)) }})
+       } else {
+         let arr: [i32; 4] = cast(self);
+@@ -497,7 +497,7 @@ impl i32x4 {
+         move_mask_m128(cast(self.sse))
+       } else if #[cfg(target_feature="simd128")] {
+         u32x4_bitmask(self.simd) as i32
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe
+         {
+           // set all to 1 if top bit is set, else 0
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_i64x2__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_i64x2__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_i64x2__.rs  Sat Feb 15 23:41:46 2025
@@ -0,0 +1,105 @@
+$NetBSD: patch-.._vendor_wide-0.7.15_src_i64x2__.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/wide-0.7.15/src/i64x2_.rs.orig   2024-10-02 14:09:40.101937836 +0000
++++ ../vendor/wide-0.7.15/src/i64x2_.rs
+@@ -25,7 +25,7 @@ pick! {
+     }
+ 
+     impl Eq for i64x2 { }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -72,7 +72,7 @@ impl Add for i64x2 {
+         Self { sse: add_i64_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i64x2_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vaddq_s64(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -94,7 +94,7 @@ impl Sub for i64x2 {
+         Self { sse: sub_i64_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i64x2_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vsubq_s64(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -191,7 +191,7 @@ impl BitAnd for i64x2 {
+         Self { sse: bitand_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vandq_s64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -213,7 +213,7 @@ impl BitOr for i64x2 {
+         Self { sse: bitor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vorrq_s64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -235,7 +235,7 @@ impl BitXor for i64x2 {
+         Self { sse: bitxor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: veorq_s64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -261,7 +261,7 @@ macro_rules! impl_shl_t_for_i64x2 {
+             Self { sse: shl_all_u64_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: i64x2_shl(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+             unsafe {Self { neon: vshlq_s64(self.neon, vmovq_n_s64(rhs as i64)) }}
+           } else {
+             let u = rhs as u64;
+@@ -314,7 +314,7 @@ impl CmpEq for i64x2 {
+         Self { sse: cmp_eq_mask_i64_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i64x2_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_s64_u64(vceqq_s64(self.neon, rhs.neon)) }}
+       } else {
+         let s: [i64;2] = cast(self);
+@@ -338,7 +338,7 @@ impl CmpGt for i64x2 {
+         Self { sse: cmp_gt_mask_i64_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i64x2_gt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_s64_u64(vcgtq_s64(self.neon, rhs.neon)) }}
+       } else {
+         let s: [i64;2] = cast(self);
+@@ -362,7 +362,7 @@ impl CmpLt for i64x2 {
+         Self { sse: !cmp_gt_mask_i64_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i64x2_lt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_s64_u64(vcltq_s64(self.neon, rhs.neon)) }}
+       } else {
+         let s: [i64;2] = cast(self);
+@@ -390,7 +390,7 @@ impl i64x2 {
+         Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vbslq_s64(vreinterpretq_u64_s64(self.neon), t.neon, f.neon) }}
+       } else {
+         generic_bit_blend(self, t, f)
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_i8x16__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_i8x16__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_i8x16__.rs  Sat Feb 15 23:41:46 2025
@@ -0,0 +1,168 @@
+$NetBSD: patch-.._vendor_wide-0.7.15_src_i8x16__.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/wide-0.7.15/src/i8x16_.rs.orig   2024-10-02 14:11:05.334444151 +0000
++++ ../vendor/wide-0.7.15/src/i8x16_.rs
+@@ -25,7 +25,7 @@ pick! {
+     }
+ 
+     impl Eq for i8x16 { }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -70,7 +70,7 @@ impl Add for i8x16 {
+         Self { sse: add_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vaddq_s8(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -106,7 +106,7 @@ impl Sub for i8x16 {
+         Self { sse: sub_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vsubq_s8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -178,7 +178,7 @@ impl BitAnd for i8x16 {
+         Self { sse: bitand_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vandq_s8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -214,7 +214,7 @@ impl BitOr for i8x16 {
+         Self { sse: bitor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vorrq_s8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -250,7 +250,7 @@ impl BitXor for i8x16 {
+         Self { sse: bitxor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: veorq_s8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -286,7 +286,7 @@ impl CmpEq for i8x16 {
+         Self { sse: cmp_eq_mask_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_s8_u8(vceqq_s8(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -322,7 +322,7 @@ impl CmpGt for i8x16 {
+         Self { sse: cmp_gt_mask_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_gt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_s8_u8(vcgtq_s8(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -358,7 +358,7 @@ impl CmpLt for i8x16 {
+         Self { sse: cmp_lt_mask_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_lt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vreinterpretq_s8_u8(vcltq_s8(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -400,7 +400,7 @@ impl i8x16 {
+         i8x16 { sse: pack_i16_to_i8_m128i( extract_m128i_from_m256i::<0>(v.avx2), extract_m128i_from_m256i::<1>(v.avx2))  }
+       } else if #[cfg(target_feature="sse2")] {
+         i8x16 { sse: pack_i16_to_i8_m128i( v.a.sse, v.b.sse ) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))] {
+         use core::arch::aarch64::*;
+ 
+         unsafe {
+@@ -487,7 +487,7 @@ impl i8x16 {
+         Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vbslq_s8(vreinterpretq_u8_s8(self.neon), t.neon, f.neon) }}
+       } else {
+         generic_bit_blend(self, t, f)
+@@ -502,7 +502,7 @@ impl i8x16 {
+         Self { sse: abs_i8_m128i(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_abs(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vabsq_s8(self.neon) }}
+       } else {
+         let arr: [i8; 16] = cast(self);
+@@ -535,7 +535,7 @@ impl i8x16 {
+         Self { sse: max_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_max(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vmaxq_s8(self.neon, rhs.neon) }}
+       } else {
+         self.cmp_lt(rhs).blend(rhs, self)
+@@ -550,7 +550,7 @@ impl i8x16 {
+         Self { sse: min_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_min(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vminq_s8(self.neon, rhs.neon) }}
+       } else {
+         self.cmp_lt(rhs).blend(self, rhs)
+@@ -568,7 +568,7 @@ impl i8x16 {
+         unsafe { Self { sse: load_unaligned_m128i( &*(input.as_ptr() as * const [u8;16]) ) } }
+       } else if #[cfg(target_feature="simd128")] {
+         unsafe { Self { simd: v128_load(input.as_ptr() as *const v128 ) } }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vld1q_s8( input.as_ptr() as *const i8 ) } }
+       } else {
+         // 2018 edition doesn't have try_into
+@@ -585,7 +585,7 @@ impl i8x16 {
+         move_mask_i8_m128i(self.sse)
+       } else if #[cfg(target_feature="simd128")] {
+         i8x16_bitmask(self.simd) as i32
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe
+         {
+           // set all to 1 if top bit is set, else 0
+@@ -666,7 +666,7 @@ impl i8x16 {
+         Self { sse: add_saturating_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_add_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vqaddq_s8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -698,7 +698,7 @@ impl i8x16 {
+         Self { sse: sub_saturating_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_sub_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vqsubq_s8(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_u16x8__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_u16x8__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_u16x8__.rs  Sat Feb 15 23:41:46 2025
@@ -0,0 +1,141 @@
+$NetBSD: patch-.._vendor_wide-0.7.15_src_u16x8__.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/wide-0.7.15/src/u16x8_.rs.orig   2024-10-02 14:12:38.841269752 +0000
++++ ../vendor/wide-0.7.15/src/u16x8_.rs
+@@ -25,7 +25,7 @@ pick! {
+     }
+ 
+     impl Eq for u16x8 { }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+       use core::arch::aarch64::*;
+       #[repr(C)]
+       #[derive(Copy, Clone)]
+@@ -70,7 +70,7 @@ impl Add for u16x8 {
+         Self { sse: add_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u16x8_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vaddq_u16(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -98,7 +98,7 @@ impl Sub for u16x8 {
+         Self { sse: sub_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u16x8_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vsubq_u16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -126,7 +126,7 @@ impl Mul for u16x8 {
+         Self { sse: mul_i16_keep_low_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u16x8_mul(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vmulq_u16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -208,7 +208,7 @@ impl BitAnd for u16x8 {
+         Self { sse: bitand_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vandq_u16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -236,7 +236,7 @@ impl BitOr for u16x8 {
+         Self { sse: bitor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vorrq_u16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -264,7 +264,7 @@ impl BitXor for u16x8 {
+         Self { sse: bitxor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: veorq_u16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -296,7 +296,7 @@ macro_rules! impl_shl_t_for_u16x8 {
+             Self { sse: shl_all_u16_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: u16x8_shl(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+             unsafe {Self { neon: vshlq_u16(self.neon, vmovq_n_s16(rhs as i16)) }}
+           } else {
+             let u = rhs as u64;
+@@ -332,7 +332,7 @@ macro_rules! impl_shr_t_for_u16x8 {
+             Self { sse: shr_all_u16_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: u16x8_shr(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+             unsafe {Self { neon: vshlq_u16(self.neon, vmovq_n_s16( -(rhs as i16))) }}
+           } else {
+             let u = rhs as u64;
+@@ -368,7 +368,7 @@ impl u16x8 {
+         Self { sse: cmp_eq_mask_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u16x8_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vceqq_u16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -392,7 +392,7 @@ impl u16x8 {
+         Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vbslq_u16(self.neon, t.neon, f.neon) }}
+       } else {
+         generic_bit_blend(self, t, f)
+@@ -407,7 +407,7 @@ impl u16x8 {
+         Self { sse: max_u8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u16x8_max(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vmaxq_u16(self.neon, rhs.neon) }}
+       } else {
+         let arr: [u16; 8] = cast(self);
+@@ -433,7 +433,7 @@ impl u16x8 {
+         Self { sse: min_u8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u16x8_min(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vminq_u16(self.neon, rhs.neon) }}
+       } else {
+         let arr: [u16; 8] = cast(self);
+@@ -460,7 +460,7 @@ impl u16x8 {
+         Self { sse: add_saturating_u16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u16x8_add_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vqaddq_u16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -484,7 +484,7 @@ impl u16x8 {
+         Self { sse: sub_saturating_u16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u16x8_sub_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vqsubq_u16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_u32x4__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_u32x4__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_u32x4__.rs  Sat Feb 15 23:41:46 2025
@@ -0,0 +1,144 @@
+$NetBSD: patch-.._vendor_wide-0.7.15_src_u32x4__.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/wide-0.7.15/src/u32x4_.rs.orig   2024-10-02 14:14:03.899924604 +0000
++++ ../vendor/wide-0.7.15/src/u32x4_.rs
+@@ -25,7 +25,7 @@ pick! {
+     }
+ 
+     impl Eq for u32x4 { }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -70,7 +70,7 @@ impl Add for u32x4 {
+         Self { sse: add_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u32x4_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vaddq_u32(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -94,7 +94,7 @@ impl Sub for u32x4 {
+         Self { sse: sub_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u32x4_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vsubq_u32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -118,7 +118,7 @@ impl Mul for u32x4 {
+         Self { sse: mul_32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u32x4_mul(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vmulq_u32(self.neon, rhs.neon) }}
+       } else {
+         let arr1: [u32; 4] = cast(self);
+@@ -198,7 +198,7 @@ impl BitAnd for u32x4 {
+         Self { sse: bitand_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vandq_u32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -222,7 +222,7 @@ impl BitOr for u32x4 {
+         Self { sse: bitor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vorrq_u32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -246,7 +246,7 @@ impl BitXor for u32x4 {
+         Self { sse: bitxor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: veorq_u32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -274,7 +274,7 @@ macro_rules! impl_shl_t_for_u32x4 {
+             Self { sse: shl_all_u32_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: u32x4_shl(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+             unsafe {Self { neon: vshlq_u32(self.neon, vmovq_n_s32(rhs as i32)) }}
+           } else {
+             let u = rhs as u64;
+@@ -306,7 +306,7 @@ macro_rules! impl_shr_t_for_u32x4 {
+             Self { sse: shr_all_u32_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: u32x4_shr(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+             unsafe {Self { neon: vshlq_u32(self.neon, vmovq_n_s32( -(rhs as i32))) }}
+           } else {
+             let u = rhs as u64;
+@@ -338,7 +338,7 @@ impl u32x4 {
+         Self { sse: cmp_eq_mask_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u32x4_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vceqq_u32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -358,7 +358,7 @@ impl u32x4 {
+         Self { sse: cmp_gt_mask_i32_m128i(self.sse,rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u32x4_gt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vcgtq_u32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -378,7 +378,7 @@ impl u32x4 {
+         Self { sse: cmp_lt_mask_i32_m128i(self.sse,rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u32x4_lt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vcltq_u32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -398,7 +398,7 @@ impl u32x4 {
+         Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vbslq_u32(self.neon, t.neon, f.neon) }}
+       } else {
+         generic_bit_blend(self, t, f)
+@@ -413,9 +413,9 @@ impl u32x4 {
+         Self { sse: max_u32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u32x4_max(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vmaxq_u32(self.neon, rhs.neon) }}
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vmaxq_u16(self.neon, rhs.neon) }}
+       } else {
+         let arr: [u32; 4] = cast(self);
+@@ -437,7 +437,7 @@ impl u32x4 {
+         Self { sse: min_u32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u32x4_min(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vminq_u32(self.neon, rhs.neon) }}
+       } else {
+         let arr: [u32; 4] = cast(self);
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_u64x2__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_u64x2__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_u64x2__.rs  Sat Feb 15 23:41:46 2025
@@ -0,0 +1,105 @@
+$NetBSD: patch-.._vendor_wide-0.7.15_src_u64x2__.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/wide-0.7.15/src/u64x2_.rs.orig   2024-10-02 14:15:46.878498280 +0000
++++ ../vendor/wide-0.7.15/src/u64x2_.rs
+@@ -25,7 +25,7 @@ pick! {
+     }
+ 
+     impl Eq for u64x2 { }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -72,7 +72,7 @@ impl Add for u64x2 {
+         Self { sse: add_i64_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u64x2_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vaddq_u64(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -94,7 +94,7 @@ impl Sub for u64x2 {
+         Self { sse: sub_i64_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u64x2_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vsubq_u64(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -191,7 +191,7 @@ impl BitAnd for u64x2 {
+         Self { sse: bitand_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vandq_u64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -213,7 +213,7 @@ impl BitOr for u64x2 {
+         Self { sse: bitor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vorrq_u64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -235,7 +235,7 @@ impl BitXor for u64x2 {
+         Self { sse: bitxor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: veorq_u64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -261,7 +261,7 @@ macro_rules! impl_shl_t_for_u64x2 {
+             Self { sse: shl_all_u64_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: u64x2_shl(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+             unsafe {Self { neon: vshlq_u64(self.neon, vmovq_n_s64(rhs as i64)) }}
+           } else {
+             let u = rhs as u64;
+@@ -291,7 +291,7 @@ macro_rules! impl_shr_t_for_u64x2 {
+             Self { sse: shr_all_u64_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: u64x2_shr(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+             unsafe {Self { neon: vshlq_u64(self.neon, vmovq_n_s64(-(rhs as i64))) }}
+           } else {
+             let u = rhs as u64;
+@@ -321,7 +321,7 @@ impl u64x2 {
+         Self { sse: cmp_eq_mask_i64_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u64x2_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vceqq_u64(self.neon, rhs.neon) } }
+       } else {
+         let s: [u64;2] = cast(self);
+@@ -339,7 +339,7 @@ impl u64x2 {
+     pick! {
+       if #[cfg(target_feature="sse4.2")] {
+         Self { sse: cmp_gt_mask_i64_m128i(self.sse, rhs.sse) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vcgtq_u64(self.neon, rhs.neon) }}
+       } else {
+         // u64x2_gt on WASM is not a thing. https://github.com/WebAssembly/simd/pull/414
+@@ -361,7 +361,7 @@ impl u64x2 {
+         Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vbslq_u64(self.neon, t.neon, f.neon) }}
+       } else {
+         generic_bit_blend(self, t, f)
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_u8x16__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_u8x16__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.15_src_u8x16__.rs  Sat Feb 15 23:41:46 2025
@@ -0,0 +1,141 @@
+$NetBSD: patch-.._vendor_wide-0.7.15_src_u8x16__.rs,v 1.1 2025/02/15 23:41:46 he Exp $
+
+Do not try to use neon / SIMD on big-endian aarch64.
+
+--- ../vendor/wide-0.7.15/src/u8x16_.rs.orig   2024-10-02 14:18:31.196265523 +0000
++++ ../vendor/wide-0.7.15/src/u8x16_.rs
+@@ -25,7 +25,7 @@ pick! {
+     }
+ 
+     impl Eq for u8x16 { }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -70,7 +70,7 @@ impl Add for u8x16 {
+         Self { sse: add_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u8x16_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vaddq_u8(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -106,7 +106,7 @@ impl Sub for u8x16 {
+         Self { sse: sub_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u8x16_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vsubq_u8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -178,7 +178,7 @@ impl BitAnd for u8x16 {
+         Self { sse: bitand_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vandq_u8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -214,7 +214,7 @@ impl BitOr for u8x16 {
+         Self { sse: bitor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vorrq_u8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -250,7 +250,7 @@ impl BitXor for u8x16 {
+         Self { sse: bitxor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: veorq_u8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -290,7 +290,7 @@ impl u8x16 {
+         Self { sse: cmp_eq_mask_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u8x16_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vceqq_u8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -322,7 +322,7 @@ impl u8x16 {
+         Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vbslq_u8(self.neon, t.neon, f.neon) }}
+       } else {
+         generic_bit_blend(self, t, f)
+@@ -337,7 +337,7 @@ impl u8x16 {
+         Self { sse: max_u8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u8x16_max(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vmaxq_u8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -369,7 +369,7 @@ impl u8x16 {
+         Self { sse: min_u8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u8x16_min(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vminq_u8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -402,7 +402,7 @@ impl u8x16 {
+         Self { sse: add_saturating_u8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u8x16_add_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe {Self { neon: vqaddq_u8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -434,7 +434,7 @@ impl u8x16 {
+         Self { sse: sub_saturating_u8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u8x16_sub_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64", target_endian = "little"))]{
+         unsafe { Self { neon: vqsubq_u8(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -468,7 +468,7 @@ impl u8x16 {
+             u8x16 { sse: unpack_low_i8_m128i(lhs.sse, rhs.sse) }
+         } else if #[cfg(target_feature = "simd128")] {
+           u8x16 { simd: u8x16_shuffle::<0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23>(lhs.simd, rhs.simd) }
+-        } else if #[cfg(all(target_feature = "neon", target_arch = "aarch64"))] {
++        } else if #[cfg(all(target_feature = "neon", target_arch = "aarch64", target_endian = "little"))] {
+             let lhs = unsafe { vget_low_u8(lhs.neon) };
+             let rhs = unsafe { vget_low_u8(rhs.neon) };
+ 
+@@ -498,7 +498,7 @@ impl u8x16 {
+             u8x16 { sse: unpack_high_i8_m128i(lhs.sse, rhs.sse) }
+         } else if #[cfg(target_feature = "simd128")] {
+             u8x16 { simd: u8x16_shuffle::<8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31>(lhs.simd, rhs.simd) }
+-        } else if #[cfg(all(target_feature = "neon", target_arch = "aarch64"))] {
++        } else if #[cfg(all(target_feature = "neon", target_arch = "aarch64", target_endian = "little"))] {
+             let lhs = unsafe { vget_high_u8(lhs.neon) };
+             let rhs = unsafe { vget_high_u8(rhs.neon) };
+ 
+@@ -528,7 +528,7 @@ impl u8x16 {
+             u8x16 { sse: pack_i16_to_u8_m128i(lhs.sse, rhs.sse) }
+         } else if #[cfg(target_feature = "simd128")] {
+             u8x16 { simd: u8x16_narrow_i16x8(lhs.simd, rhs.simd) }
+-        } else if #[cfg(all(target_feature = "neon", target_arch = "aarch64"))] {
++        } else if #[cfg(all(target_feature = "neon", target_arch = "aarch64", target_endian = "little"))] {
+             let lhs = unsafe { vqmovun_s16(lhs.neon) };
+             let rhs = unsafe { vqmovun_s16(rhs.neon) };
+             u8x16 { neon: unsafe { vcombine_u8(lhs, rhs) } }
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_f32x4__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_f32x4__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_f32x4__.rs  Sat Feb 15 23:41:47 2025
@@ -0,0 +1,258 @@
+$NetBSD: patch-.._vendor_wide-0.7.26_src_f32x4__.rs,v 1.1 2025/02/15 23:41:47 he Exp $
+
+Do not try to use neon / SIMD in big-endian mode on aarch64.
+
+--- ../vendor/wide-0.7.26/src/f32x4_.rs.orig   2025-02-15 21:29:12.400873580 +0000
++++ ../vendor/wide-0.7.26/src/f32x4_.rs
+@@ -23,7 +23,7 @@ pick! {
+         u32x4_all_true(f32x4_eq(self.simd, other.simd))
+       }
+     }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))] {
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -97,7 +97,7 @@ impl Add for f32x4 {
+         Self { sse: add_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vaddq_f32(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -121,7 +121,7 @@ impl Sub for f32x4 {
+         Self { sse: sub_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vsubq_f32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -145,7 +145,7 @@ impl Mul for f32x4 {
+         Self { sse: mul_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_mul(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vmulq_f32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -169,7 +169,7 @@ impl Div for f32x4 {
+         Self { sse: div_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_div(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vdivq_f32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -265,7 +265,7 @@ impl BitAnd for f32x4 {
+         Self { sse: bitand_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(self.neon), vreinterpretq_u32_f32(rhs.neon))) }}
+       } else {
+         Self { arr: [
+@@ -289,7 +289,7 @@ impl BitOr for f32x4 {
+         Self { sse: bitor_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(self.neon), vreinterpretq_u32_f32(rhs.neon))) }}
+       } else {
+         Self { arr: [
+@@ -313,7 +313,7 @@ impl BitXor for f32x4 {
+         Self { sse: bitxor_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(self.neon), vreinterpretq_u32_f32(rhs.neon))) }}
+       } else {
+         Self { arr: [
+@@ -337,7 +337,7 @@ impl CmpEq for f32x4 {
+         Self { sse: cmp_eq_mask_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vceqq_f32(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -361,7 +361,7 @@ impl CmpGe for f32x4 {
+         Self { sse: cmp_ge_mask_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_ge(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vcgeq_f32(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -385,7 +385,7 @@ impl CmpGt for f32x4 {
+         Self { sse: cmp_gt_mask_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_gt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vcgtq_f32(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -409,7 +409,7 @@ impl CmpNe for f32x4 {
+         Self { sse: cmp_neq_mask_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_ne(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(self.neon, rhs.neon))) }}
+       } else {
+         Self { arr: [
+@@ -433,7 +433,7 @@ impl CmpLe for f32x4 {
+         Self { sse: cmp_le_mask_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_le(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vcleq_f32(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -457,7 +457,7 @@ impl CmpLt for f32x4 {
+         Self { sse: cmp_lt_mask_m128(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_lt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vcltq_f32(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -497,7 +497,7 @@ impl f32x4 {
+     pick! {
+       if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_abs(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vabsq_f32(self.neon) }}
+       } else {
+         let non_sign_bits = f32x4::from(f32::from_bits(i32::MAX as u32));
+@@ -519,7 +519,7 @@ impl f32x4 {
+         Self {
+           simd: f32x4_pmax(self.simd, rhs.simd),
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vmaxq_f32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -558,7 +558,7 @@ impl f32x4 {
+             f32x4_ne(self.simd, self.simd), // NaN check
+           )
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vmaxnmq_f32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -584,7 +584,7 @@ impl f32x4 {
+         Self {
+           simd: f32x4_pmin(self.simd, rhs.simd),
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vminq_f32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -623,7 +623,7 @@ impl f32x4 {
+             f32x4_ne(self.simd, self.simd), // NaN check
+           )
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vminnmq_f32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -643,7 +643,7 @@ impl f32x4 {
+         Self { sse: cmp_unord_mask_m128(self.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_ne(self.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(self.neon, self.neon))) }}
+       } else {
+         Self { arr: [
+@@ -688,7 +688,7 @@ impl f32x4 {
+         mask.blend(self, f)
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_nearest(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vrndnq_f32(self.neon) }}
+       } else {
+         // Note(Lokathor): This software fallback is probably very slow compared
+@@ -752,7 +752,7 @@ impl f32x4 {
+         flip_to_max ^ cast
+       } else if #[cfg(target_feature="simd128")] {
+         cast(Self { simd: i32x4_trunc_sat_f32x4(f32x4_nearest(self.simd)) })
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         cast(unsafe {Self { neon: vreinterpretq_f32_s32(vcvtnq_s32_f32(self.neon)) }})
+       } else {
+         let rounded: [f32; 4] = cast(self.round());
+@@ -797,7 +797,7 @@ impl f32x4 {
+         flip_to_max ^ cast
+       } else if #[cfg(target_feature="simd128")] {
+         cast(Self { simd: i32x4_trunc_sat_f32x4(self.simd) })
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         cast(unsafe {Self { neon: vreinterpretq_f32_s32(vcvtq_s32_f32(self.neon)) }})
+       } else {
+         let n: [f32;4] = cast(self);
+@@ -1170,7 +1170,7 @@ impl f32x4 {
+         Self { sse: reciprocal_m128(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_div(f32x4_splat(1.0), self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vdivq_f32(vdupq_n_f32(1.0), self.neon) }}
+       } else {
+         Self { arr: [
+@@ -1190,7 +1190,7 @@ impl f32x4 {
+         Self { sse: reciprocal_sqrt_m128(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_div(f32x4_splat(1.0), f32x4_sqrt(self.simd)) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vdivq_f32(vdupq_n_f32(1.0), vsqrtq_f32(self.neon)) }}
+       } else if #[cfg(feature="std")] {
+         Self { arr: [
+@@ -1217,7 +1217,7 @@ impl f32x4 {
+         Self { sse: sqrt_m128(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_sqrt(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vsqrtq_f32(self.neon) }}
+       } else if #[cfg(feature="std")] {
+         Self { arr: [
+@@ -1245,7 +1245,7 @@ impl f32x4 {
+         move_mask_m128(self.sse)
+       } else if #[cfg(target_feature="simd128")] {
+         u32x4_bitmask(self.simd) as i32
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe
+         {
+           // set all to 1 if top bit is set, else 0
+@@ -1586,7 +1586,7 @@ impl f32x4 {
+         Self { sse: convert_to_m128_from_i32_m128i(v.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f32x4_convert_i32x4(v.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))] {
+         Self { neon: unsafe { vcvtq_f32_s32(v.neon) }}
+       } else {
+         Self { arr: [
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_f64x2__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_f64x2__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_f64x2__.rs  Sat Feb 15 23:41:47 2025
@@ -0,0 +1,231 @@
+$NetBSD: patch-.._vendor_wide-0.7.26_src_f64x2__.rs,v 1.1 2025/02/15 23:41:47 he Exp $
+
+Do not try to use neon / SIMD in big-endian mode on aarch64.
+
+--- ../vendor/wide-0.7.26/src/f64x2_.rs.orig   2025-02-15 21:33:00.553234907 +0000
++++ ../vendor/wide-0.7.26/src/f64x2_.rs
+@@ -23,7 +23,7 @@ pick! {
+         u64x2_all_true(f64x2_eq(self.simd, other.simd))
+       }
+     }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -100,7 +100,7 @@ impl Add for f64x2 {
+         Self { sse: add_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vaddq_f64(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -122,7 +122,7 @@ impl Sub for f64x2 {
+         Self { sse: sub_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vsubq_f64(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -144,7 +144,7 @@ impl Mul for f64x2 {
+         Self { sse: mul_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_mul(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vmulq_f64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -166,7 +166,7 @@ impl Div for f64x2 {
+         Self { sse: div_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_div(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vdivq_f64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -260,7 +260,7 @@ impl BitAnd for f64x2 {
+         Self { sse: bitand_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(self.neon), vreinterpretq_u64_f64(rhs.neon))) }}
+       } else {
+         Self { arr: [
+@@ -282,7 +282,7 @@ impl BitOr for f64x2 {
+         Self { sse: bitor_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(self.neon), vreinterpretq_u64_f64(rhs.neon))) }}
+       } else {
+         Self { arr: [
+@@ -304,7 +304,7 @@ impl BitXor for f64x2 {
+         Self { sse: bitxor_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(self.neon), vreinterpretq_u64_f64(rhs.neon))) }}
+       } else {
+         Self { arr: [
+@@ -326,7 +326,7 @@ impl CmpEq for f64x2 {
+         Self { sse: cmp_eq_mask_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vceqq_f64(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -348,7 +348,7 @@ impl CmpGe for f64x2 {
+         Self { sse: cmp_ge_mask_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_ge(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vcgeq_f64(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -372,7 +372,7 @@ impl CmpGt for f64x2 {
+         Self { sse: cmp_gt_mask_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_gt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vcgtq_f64(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -394,7 +394,7 @@ impl CmpNe for f64x2 {
+         Self { sse: cmp_neq_mask_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_ne(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vceqq_f64(self.neon, rhs.neon)) }.not() }
+       } else {
+         Self { arr: [
+@@ -416,7 +416,7 @@ impl CmpLe for f64x2 {
+         Self { sse: cmp_le_mask_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_le(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vcleq_f64(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -438,7 +438,7 @@ impl CmpLt for f64x2 {
+         Self { sse: cmp_lt_mask_m128d(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_lt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vcltq_f64(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -475,7 +475,7 @@ impl f64x2 {
+     pick! {
+       if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_abs(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vabsq_f64(self.neon) }}
+       } else {
+         let non_sign_bits = f64x2::from(f64::from_bits(i64::MAX as u64));
+@@ -497,7 +497,7 @@ impl f64x2 {
+         Self {
+           simd: f64x2_pmax(self.simd, rhs.simd),
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vmaxq_f64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -534,7 +534,7 @@ impl f64x2 {
+             f64x2_ne(self.simd, self.simd), // NaN check
+           )
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vmaxnmq_f64(self.neon, rhs.neon) }}
+             } else {
+         Self { arr: [
+@@ -558,7 +558,7 @@ impl f64x2 {
+         Self {
+           simd: f64x2_pmin(self.simd, rhs.simd),
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vminq_f64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -595,7 +595,7 @@ impl f64x2 {
+             f64x2_ne(self.simd, self.simd), // NaN check
+           )
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vminnmq_f64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -614,7 +614,7 @@ impl f64x2 {
+         Self { sse: cmp_unord_mask_m128d(self.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_ne(self.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u64(vceqq_f64(self.neon, self.neon)) }.not() }
+       } else {
+         Self { arr: [
+@@ -1229,7 +1229,7 @@ impl f64x2 {
+         Self { sse: sqrt_m128d(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_sqrt(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vsqrtq_f64(self.neon) }}
+       } else if #[cfg(feature="std")] {
+         Self { arr: [
+@@ -1252,7 +1252,7 @@ impl f64x2 {
+         move_mask_m128d(self.sse)
+       } else if #[cfg(target_feature="simd128")] {
+         u64x2_bitmask(self.simd) as i32
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe
+         {
+           let e = vreinterpretq_u64_f64(self.neon);
+@@ -1400,7 +1400,7 @@ impl f64x2 {
+       } else if #[cfg(any(target_feature="sse2", target_feature="simd128"))] {
+         let a: [f64;2] = cast(self);
+         a.iter().sum()
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { vgetq_lane_f64(self.neon,0) + vgetq_lane_f64(self.neon,1) }
+       } else {
+         self.arr.iter().sum()
+@@ -1617,7 +1617,7 @@ impl f64x2 {
+         Self { sse: convert_to_m128d_from_lower2_i32_m128i(v.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: f64x2_convert_low_i32x4(v.simd)}
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))] {
+         Self { neon: unsafe { vcvtq_f64_s64(vmovl_s32(vget_low_s32(v.neon))) }}
+       } else {
+         Self { arr: [
+@@ -1646,7 +1646,7 @@ impl Not for f64x2 {
+         Self { sse: self.sse.not() }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_not(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_f64_u32(vmvnq_u32(vreinterpretq_u32_f64(self.neon))) }}
+       } else {
+         Self { arr: [
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_i16x8__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_i16x8__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_i16x8__.rs  Sat Feb 15 23:41:47 2025
@@ -0,0 +1,303 @@
+$NetBSD: patch-.._vendor_wide-0.7.26_src_i16x8__.rs,v 1.1 2025/02/15 23:41:47 he Exp $
+
+Do not try to use neon / SIMD in big-endian mode on aarch64.
+
+--- ../vendor/wide-0.7.26/src/i16x8_.rs.orig   2025-02-15 21:35:09.865330384 +0000
++++ ../vendor/wide-0.7.26/src/i16x8_.rs
+@@ -25,7 +25,7 @@ pick! {
+     }
+ 
+     impl Eq for i16x8 { }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -70,7 +70,7 @@ impl Add for i16x8 {
+         Self { sse: add_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vaddq_s16(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -98,7 +98,7 @@ impl Sub for i16x8 {
+         Self { sse: sub_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vsubq_s16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -126,7 +126,7 @@ impl Mul for i16x8 {
+         Self { sse: mul_i16_keep_low_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_mul(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vmulq_s16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -208,7 +208,7 @@ impl BitAnd for i16x8 {
+         Self { sse: bitand_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vandq_s16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -236,7 +236,7 @@ impl BitOr for i16x8 {
+         Self { sse: bitor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vorrq_s16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -264,7 +264,7 @@ impl BitXor for i16x8 {
+         Self { sse: bitxor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: veorq_s16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -296,7 +296,7 @@ macro_rules! impl_shl_t_for_i16x8 {
+             Self { sse: shl_all_u16_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: i16x8_shl(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+             unsafe {Self { neon: vshlq_s16(self.neon, vmovq_n_s16(rhs as i16)) }}
+           } else {
+             let u = rhs as u64;
+@@ -332,7 +332,7 @@ macro_rules! impl_shr_t_for_i16x8 {
+             Self { sse: shr_all_i16_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: i16x8_shr(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+             unsafe {Self { neon: vshlq_s16(self.neon, vmovq_n_s16( -(rhs as i16))) }}
+           } else {
+             let u = rhs as u64;
+@@ -364,7 +364,7 @@ impl CmpEq for i16x8 {
+         Self { sse: cmp_eq_mask_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_s16_u16(vceqq_s16(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -392,7 +392,7 @@ impl CmpGt for i16x8 {
+         Self { sse: cmp_gt_mask_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_gt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_s16_u16(vcgtq_s16(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -420,7 +420,7 @@ impl CmpLt for i16x8 {
+         Self { sse: cmp_lt_mask_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_lt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_s16_u16(vcltq_s16(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -453,7 +453,7 @@ impl i16x8 {
+         move_mask_i8_m128i( pack_i16_to_i8_m128i(self.sse,self.sse)) & 0xff
+       } else if #[cfg(target_feature="simd128")] {
+         i16x8_bitmask(self.simd) as i32
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe
+         {
+           // set all to 1 if top bit is set, else 0
+@@ -487,7 +487,7 @@ impl i16x8 {
+         (move_mask_i8_m128i(self.sse) & 0b1010101010101010) != 0
+       } else if #[cfg(target_feature="simd128")] {
+         u16x8_bitmask(self.simd) != 0
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))] {
+         unsafe {
+           vminvq_s16(self.neon) < 0
+         }
+@@ -506,7 +506,7 @@ impl i16x8 {
+         (move_mask_i8_m128i(self.sse) & 0b1010101010101010) == 0b1010101010101010
+       } else if #[cfg(target_feature="simd128")] {
+         u16x8_bitmask(self.simd) == 0b11111111
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))] {
+         unsafe {
+           vmaxvq_s16(self.neon) < 0
+         }
+@@ -558,7 +558,7 @@ impl i16x8 {
+         use core::arch::wasm32::*;
+ 
+         i16x8 { simd: i16x8_narrow_i32x4(v.a.simd, v.b.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))] {
+         use core::arch::aarch64::*;
+ 
+         unsafe {
+@@ -628,7 +628,7 @@ impl i16x8 {
+         unsafe { Self { sse: load_unaligned_m128i( &*(input.as_ptr() as * const [u8;16]) ) } }
+       } else if #[cfg(target_feature="simd128")] {
+         unsafe { Self { simd: v128_load(input.as_ptr() as *const v128 ) } }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vld1q_s16( input.as_ptr() as *const i16 ) } }
+       } else {
+         // 2018 edition doesn't have try_into
+@@ -645,7 +645,7 @@ impl i16x8 {
+         Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vbslq_s16(vreinterpretq_u16_s16(self.neon), t.neon, f.neon) }}
+       } else {
+         generic_bit_blend(self, t, f)
+@@ -672,7 +672,7 @@ impl i16x8 {
+         let lo16 = shr_imm_u32_m128i::<16>(sum32);
+         let sum16 = add_i16_m128i(sum32, lo16);
+         extract_i16_as_i32_m128i::<0>(sum16) as i16
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { vaddvq_s16(self.neon) }
+       } else {
+         let arr: [i16; 8] = cast(self);
+@@ -703,7 +703,7 @@ impl i16x8 {
+           let lo16 = shr_imm_u32_m128i::<16>(sum32);
+           let sum16 = min_i16_m128i(sum32, lo16);
+           extract_i16_as_i32_m128i::<0>(sum16) as i16
+-        } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++        } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+           unsafe { vminvq_s16(self.neon) }
+         } else {
+         let arr: [i16; 8] = cast(self);
+@@ -734,7 +734,7 @@ impl i16x8 {
+           let lo16 = shr_imm_u32_m128i::<16>(sum32);
+           let sum16 = max_i16_m128i(sum32, lo16);
+           extract_i16_as_i32_m128i::<0>(sum16) as i16
+-        } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++        } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+           unsafe { vmaxvq_s16(self.neon) }
+         } else {
+         let arr: [i16; 8] = cast(self);
+@@ -763,7 +763,7 @@ impl i16x8 {
+         Self { sse: abs_i16_m128i(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_abs(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vabsq_s16(self.neon) }}
+       } else {
+         let arr: [i16; 8] = cast(self);
+@@ -793,7 +793,7 @@ impl i16x8 {
+         u16x8 { sse: abs_i16_m128i(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         u16x8 { simd: i16x8_abs(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {u16x8 { neon: vreinterpretq_u16_s16(vabsq_s16(self.neon)) }}
+       } else {
+         let arr: [i16; 8] = cast(self);
+@@ -820,7 +820,7 @@ impl i16x8 {
+         Self { sse: max_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_max(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vmaxq_s16(self.neon, rhs.neon) }}
+       } else {
+         self.cmp_lt(rhs).blend(rhs, self)
+@@ -835,7 +835,7 @@ impl i16x8 {
+         Self { sse: min_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_min(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vminq_s16(self.neon, rhs.neon) }}
+       } else {
+         self.cmp_lt(rhs).blend(self, rhs)
+@@ -851,7 +851,7 @@ impl i16x8 {
+         Self { sse: add_saturating_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_add_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vqaddq_s16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -875,7 +875,7 @@ impl i16x8 {
+         Self { sse: sub_saturating_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_sub_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vqsubq_s16(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -904,7 +904,7 @@ impl i16x8 {
+         i32x4 { sse:  mul_i16_horizontal_add_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         i32x4 { simd: i32x4_dot_i16x8(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {
+           let pl = vmull_s16(vget_low_s16(self.neon),  vget_low_s16(rhs.neon));
+           let ph = vmull_high_s16(self.neon, rhs.neon);
+@@ -947,7 +947,7 @@ impl i16x8 {
+         Self { sse: s }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_q15mulr_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vqrdmulhq_s16(self.neon, rhs.neon) } }
+       } else {
+         // compiler does a surprisingly good job of vectorizing this
+@@ -972,7 +972,7 @@ impl i16x8 {
+     pick! {
+       if #[cfg(target_feature="sse2")] {
+         Self { sse: mul_i16_keep_high_m128i(lhs.sse, rhs.sse) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))] {
+         let lhs_low = unsafe { vget_low_s16(lhs.neon) };
+         let rhs_low = unsafe { vget_low_s16(rhs.neon) };
+ 
+@@ -1019,7 +1019,7 @@ impl i16x8 {
+           a: i32x4 { sse:unpack_low_i16_m128i(low, high) },
+           b: i32x4 { sse:unpack_high_i16_m128i(low, high) }
+         }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))] {
+          let lhs_low = unsafe { vget_low_s16(self.neon) };
+          let rhs_low = unsafe { vget_low_s16(rhs.neon) };
+ 
+@@ -1081,7 +1081,7 @@ impl i16x8 {
+           i16x8 { sse: unpack_low_i64_m128i(b4, b8) },
+           i16x8 { sse: unpack_high_i64_m128i(b4, b8) } ,
+         ]
+-     } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++     } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+ 
+           #[inline] fn vtrq32(a : int16x8_t, b : int16x8_t) -> (int16x8_t, int16x8_t)
+           {
+@@ -1209,7 +1209,7 @@ impl i16x8 {
+         Self { sse: s }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i16x8_q15mulr_sat(self.simd, i16x8_splat(rhs)) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vqrdmulhq_n_s16(self.neon, rhs) } }
+       } else {
+         // compiler does a surprisingly good job of vectorizing this
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_i32x4__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_i32x4__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_i32x4__.rs  Sat Feb 15 23:41:47 2025
@@ -0,0 +1,204 @@
+$NetBSD: patch-.._vendor_wide-0.7.26_src_i32x4__.rs,v 1.1 2025/02/15 23:41:47 he Exp $
+
+Do not try to use neon / SIMD in big-endian mode on aarch64.
+
+--- ../vendor/wide-0.7.26/src/i32x4_.rs.orig   2025-02-15 21:36:46.085631577 +0000
++++ ../vendor/wide-0.7.26/src/i32x4_.rs
+@@ -25,7 +25,7 @@ pick! {
+     }
+ 
+     impl Eq for i32x4 { }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -70,7 +70,7 @@ impl Add for i32x4 {
+         Self { sse: add_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i32x4_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vaddq_s32(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -94,7 +94,7 @@ impl Sub for i32x4 {
+         Self { sse: sub_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i32x4_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vsubq_s32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -118,7 +118,7 @@ impl Mul for i32x4 {
+         Self { sse: mul_32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i32x4_mul(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vmulq_s32(self.neon, rhs.neon) }}
+       } else {
+         let arr1: [i32; 4] = cast(self);
+@@ -198,7 +198,7 @@ impl BitAnd for i32x4 {
+         Self { sse: bitand_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vandq_s32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -222,7 +222,7 @@ impl BitOr for i32x4 {
+         Self { sse: bitor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vorrq_s32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -246,7 +246,7 @@ impl BitXor for i32x4 {
+         Self { sse: bitxor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: veorq_s32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -274,7 +274,7 @@ macro_rules! impl_shl_t_for_i32x4 {
+             Self { sse: shl_all_u32_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: i32x4_shl(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+             unsafe {Self { neon: vshlq_s32(self.neon, vmovq_n_s32(rhs as i32)) }}
+           } else {
+             let u = rhs as u64;
+@@ -306,7 +306,7 @@ macro_rules! impl_shr_t_for_i32x4 {
+             Self { sse: shr_all_i32_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: i32x4_shr(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+             unsafe {Self { neon: vshlq_s32(self.neon, vmovq_n_s32( -(rhs as i32))) }}
+           } else {
+             let u = rhs as u64;
+@@ -340,7 +340,7 @@ impl Shr<i32x4> for i32x4 {
+         // mask the shift count to 31 to have same behavior on all platforms
+         let shift_by = bitand_m128i(rhs.sse, set_splat_i32_m128i(31));
+         Self { sse: shr_each_i32_m128i(self.sse, shift_by) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {
+           // mask the shift count to 31 to have same behavior on all platforms
+           // no right shift, have to pass negative value to left shift on neon
+@@ -377,7 +377,7 @@ impl Shl<i32x4> for i32x4 {
+         // mask the shift count to 31 to have same behavior on all platforms
+         let shift_by = bitand_m128i(rhs.sse, set_splat_i32_m128i(31));
+         Self { sse: shl_each_u32_m128i(self.sse, shift_by) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {
+           // mask the shift count to 31 to have same behavior on all platforms
+           let shift_by = vandq_s32(rhs.neon, vmovq_n_s32(31));
+@@ -407,7 +407,7 @@ impl CmpEq for i32x4 {
+         Self { sse: cmp_eq_mask_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i32x4_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_s32_u32(vceqq_s32(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -431,7 +431,7 @@ impl CmpGt for i32x4 {
+         Self { sse: cmp_gt_mask_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i32x4_gt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_s32_u32(vcgtq_s32(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -455,7 +455,7 @@ impl CmpLt for i32x4 {
+         Self { sse: cmp_lt_mask_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i32x4_lt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_s32_u32(vcltq_s32(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -483,7 +483,7 @@ impl i32x4 {
+         Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vbslq_s32(vreinterpretq_u32_s32(self.neon), t.neon, f.neon) }}
+       } else {
+         generic_bit_blend(self, t, f)
+@@ -498,7 +498,7 @@ impl i32x4 {
+         Self { sse: abs_i32_m128i(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i32x4_abs(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vabsq_s32(self.neon) }}
+       } else {
+         let arr: [i32; 4] = cast(self);
+@@ -520,7 +520,7 @@ impl i32x4 {
+         u32x4 { sse: abs_i32_m128i(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         u32x4 { simd: i32x4_abs(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {u32x4 { neon: vreinterpretq_u32_s32(vabsq_s32(self.neon)) }}
+       } else {
+         let arr: [i32; 4] = cast(self);
+@@ -590,7 +590,7 @@ impl i32x4 {
+         Self { sse: min_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i32x4_min(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vminq_s32(self.neon, rhs.neon) }}
+       } else {
+         self.cmp_lt(rhs).blend(self, rhs)
+@@ -605,7 +605,7 @@ impl i32x4 {
+         cast(convert_to_m128_from_i32_m128i(self.sse))
+       } else if #[cfg(target_feature="simd128")] {
+         cast(Self { simd: f32x4_convert_i32x4(self.simd) })
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         cast(unsafe {Self { neon: vreinterpretq_s32_f32(vcvtq_f32_s32(self.neon)) }})
+       } else {
+         let arr: [i32; 4] = cast(self);
+@@ -628,7 +628,7 @@ impl i32x4 {
+         move_mask_m128(cast(self.sse))
+       } else if #[cfg(target_feature="simd128")] {
+         u32x4_bitmask(self.simd) as i32
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe
+         {
+           // set all to 1 if top bit is set, else 0
+@@ -659,7 +659,7 @@ impl i32x4 {
+         move_mask_m128(cast(self.sse)) != 0
+       } else if #[cfg(target_feature="simd128")] {
+         u32x4_bitmask(self.simd) != 0
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))] {
+         // some lanes are negative
+         unsafe {
+           vminvq_s32(self.neon) < 0
+@@ -680,7 +680,7 @@ impl i32x4 {
+         move_mask_m128(cast(self.sse)) == 0b1111
+       } else if #[cfg(target_feature="simd128")] {
+         u32x4_bitmask(self.simd) == 0b1111
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         // all lanes are negative
+         unsafe {
+           vmaxvq_s32(self.neon) < 0
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_i64x2__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_i64x2__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_i64x2__.rs  Sat Feb 15 23:41:47 2025
@@ -0,0 +1,123 @@
+$NetBSD: patch-.._vendor_wide-0.7.26_src_i64x2__.rs,v 1.1 2025/02/15 23:41:47 he Exp $
+
+Do not try to use neon / SIMD in big-endian mode on aarch64.
+
+--- ../vendor/wide-0.7.26/src/i64x2_.rs.orig   2025-02-15 21:38:14.017942779 +0000
++++ ../vendor/wide-0.7.26/src/i64x2_.rs
+@@ -25,7 +25,7 @@ pick! {
+     }
+ 
+     impl Eq for i64x2 { }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -72,7 +72,7 @@ impl Add for i64x2 {
+         Self { sse: add_i64_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i64x2_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vaddq_s64(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -94,7 +94,7 @@ impl Sub for i64x2 {
+         Self { sse: sub_i64_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i64x2_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vsubq_s64(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -191,7 +191,7 @@ impl BitAnd for i64x2 {
+         Self { sse: bitand_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vandq_s64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -213,7 +213,7 @@ impl BitOr for i64x2 {
+         Self { sse: bitor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vorrq_s64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -235,7 +235,7 @@ impl BitXor for i64x2 {
+         Self { sse: bitxor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: veorq_s64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -261,7 +261,7 @@ macro_rules! impl_shl_t_for_i64x2 {
+             Self { sse: shl_all_u64_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: i64x2_shl(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+             unsafe {Self { neon: vshlq_s64(self.neon, vmovq_n_s64(rhs as i64)) }}
+           } else {
+             let u = rhs as u64;
+@@ -314,7 +314,7 @@ impl CmpEq for i64x2 {
+         Self { sse: cmp_eq_mask_i64_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i64x2_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_s64_u64(vceqq_s64(self.neon, rhs.neon)) }}
+       } else {
+         let s: [i64;2] = cast(self);
+@@ -338,7 +338,7 @@ impl CmpGt for i64x2 {
+         Self { sse: cmp_gt_mask_i64_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i64x2_gt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_s64_u64(vcgtq_s64(self.neon, rhs.neon)) }}
+       } else {
+         let s: [i64;2] = cast(self);
+@@ -362,7 +362,7 @@ impl CmpLt for i64x2 {
+         Self { sse: !cmp_gt_mask_i64_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i64x2_lt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_s64_u64(vcltq_s64(self.neon, rhs.neon)) }}
+       } else {
+         let s: [i64;2] = cast(self);
+@@ -390,7 +390,7 @@ impl i64x2 {
+         Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vbslq_s64(vreinterpretq_u64_s64(self.neon), t.neon, f.neon) }}
+       } else {
+         generic_bit_blend(self, t, f)
+@@ -405,7 +405,7 @@ impl i64x2 {
+       // x86 doesn't have this builtin
+       if #[cfg(target_feature="simd128")] {
+         Self { simd: i64x2_abs(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vabsq_s64(self.neon) }}
+       } else {
+         let arr: [i64; 2] = cast(self);
+@@ -425,7 +425,7 @@ impl i64x2 {
+       // x86 doesn't have this builtin
+       if #[cfg(target_feature="simd128")] {
+         u64x2 { simd: i64x2_abs(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {u64x2 { neon: vreinterpretq_u64_s64(vabsq_s64(self.neon)) }}
+       } else {
+         let arr: [i64; 2] = cast(self);
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_i8x16__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_i8x16__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_i8x16__.rs  Sat Feb 15 23:41:47 2025
@@ -0,0 +1,213 @@
+$NetBSD: patch-.._vendor_wide-0.7.26_src_i8x16__.rs,v 1.1 2025/02/15 23:41:47 he Exp $
+
+Do not try to use neon / SIMD in big-endian mode on aarch64.
+
+--- ../vendor/wide-0.7.26/src/i8x16_.rs.orig   2025-02-15 21:39:26.050484609 +0000
++++ ../vendor/wide-0.7.26/src/i8x16_.rs
+@@ -25,7 +25,7 @@ pick! {
+     }
+ 
+     impl Eq for i8x16 { }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -70,7 +70,7 @@ impl Add for i8x16 {
+         Self { sse: add_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vaddq_s8(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -106,7 +106,7 @@ impl Sub for i8x16 {
+         Self { sse: sub_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vsubq_s8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -178,7 +178,7 @@ impl BitAnd for i8x16 {
+         Self { sse: bitand_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vandq_s8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -214,7 +214,7 @@ impl BitOr for i8x16 {
+         Self { sse: bitor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vorrq_s8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -250,7 +250,7 @@ impl BitXor for i8x16 {
+         Self { sse: bitxor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: veorq_s8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -286,7 +286,7 @@ impl CmpEq for i8x16 {
+         Self { sse: cmp_eq_mask_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_s8_u8(vceqq_s8(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -322,7 +322,7 @@ impl CmpGt for i8x16 {
+         Self { sse: cmp_gt_mask_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_gt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_s8_u8(vcgtq_s8(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -358,7 +358,7 @@ impl CmpLt for i8x16 {
+         Self { sse: cmp_lt_mask_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_lt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vreinterpretq_s8_u8(vcltq_s8(self.neon, rhs.neon)) }}
+       } else {
+         Self { arr: [
+@@ -400,7 +400,7 @@ impl i8x16 {
+         i8x16 { sse: pack_i16_to_i8_m128i( extract_m128i_from_m256i::<0>(v.avx2), extract_m128i_from_m256i::<1>(v.avx2))  }
+       } else if #[cfg(target_feature="sse2")] {
+         i8x16 { sse: pack_i16_to_i8_m128i( v.a.sse, v.b.sse ) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))] {
+         use core::arch::aarch64::*;
+ 
+         unsafe {
+@@ -487,7 +487,7 @@ impl i8x16 {
+         Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vbslq_s8(vreinterpretq_u8_s8(self.neon), t.neon, f.neon) }}
+       } else {
+         generic_bit_blend(self, t, f)
+@@ -502,7 +502,7 @@ impl i8x16 {
+         Self { sse: abs_i8_m128i(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_abs(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vabsq_s8(self.neon) }}
+       } else {
+         let arr: [i8; 16] = cast(self);
+@@ -536,7 +536,7 @@ impl i8x16 {
+         u8x16 { sse: abs_i8_m128i(self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         u8x16 { simd: i8x16_abs(self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { u8x16 { neon: vreinterpretq_u8_s8(vabsq_s8(self.neon)) }}
+       } else {
+         let arr: [i8; 16] = cast(self);
+@@ -571,7 +571,7 @@ impl i8x16 {
+         Self { sse: max_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_max(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vmaxq_s8(self.neon, rhs.neon) }}
+       } else {
+         self.cmp_lt(rhs).blend(rhs, self)
+@@ -586,7 +586,7 @@ impl i8x16 {
+         Self { sse: min_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_min(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vminq_s8(self.neon, rhs.neon) }}
+       } else {
+         self.cmp_lt(rhs).blend(self, rhs)
+@@ -604,7 +604,7 @@ impl i8x16 {
+         unsafe { Self { sse: load_unaligned_m128i( &*(input.as_ptr() as * const [u8;16]) ) } }
+       } else if #[cfg(target_feature="simd128")] {
+         unsafe { Self { simd: v128_load(input.as_ptr() as *const v128 ) } }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vld1q_s8( input.as_ptr() as *const i8 ) } }
+       } else {
+         // 2018 edition doesn't have try_into
+@@ -621,7 +621,7 @@ impl i8x16 {
+         move_mask_i8_m128i(self.sse)
+       } else if #[cfg(target_feature="simd128")] {
+         i8x16_bitmask(self.simd) as i32
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe
+         {
+           // set all to 1 if top bit is set, else 0
+@@ -667,7 +667,7 @@ impl i8x16 {
+         move_mask_i8_m128i(self.sse) != 0
+       } else if #[cfg(target_feature="simd128")] {
+         u8x16_bitmask(self.simd) != 0
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))] {
+         unsafe {
+           vminvq_s8(self.neon) < 0
+         }
+@@ -685,7 +685,7 @@ impl i8x16 {
+         move_mask_i8_m128i(self.sse) == 0b1111_1111_1111_1111
+       } else if #[cfg(target_feature="simd128")] {
+         u8x16_bitmask(self.simd) == 0b1111_1111_1111_1111
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))] {
+         unsafe {
+           vmaxvq_s8(self.neon) < 0
+         }
+@@ -708,7 +708,7 @@ impl i8x16 {
+         Self { sse: shuffle_av_i8z_all_m128i(self.sse, add_saturating_u8_m128i(rhs.sse, set_splat_i8_m128i(0x70))) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_swizzle(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))] {
+         unsafe { Self { neon: vqtbl1q_s8(self.neon, vreinterpretq_u8_s8(rhs.neon)) } }
+       } else {
+         let idxs = rhs.to_array();
+@@ -741,7 +741,7 @@ impl i8x16 {
+         Self { sse: shuffle_av_i8z_all_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_swizzle(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))] {
+         unsafe { Self { neon: vqtbl1q_s8(self.neon, vreinterpretq_u8_s8(rhs.neon)) } }
+       } else {
+         let idxs = rhs.to_array();
+@@ -774,7 +774,7 @@ impl i8x16 {
+         Self { sse: add_saturating_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_add_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vqaddq_s8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -806,7 +806,7 @@ impl i8x16 {
+         Self { sse: sub_saturating_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: i8x16_sub_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vqsubq_s8(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_u16x8__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_u16x8__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_u16x8__.rs  Sat Feb 15 23:41:47 2025
@@ -0,0 +1,141 @@
+$NetBSD: patch-.._vendor_wide-0.7.26_src_u16x8__.rs,v 1.1 2025/02/15 23:41:47 he Exp $
+
+Do not try to use neon / SIMD in big-endian mode on aarch64.
+
+--- ../vendor/wide-0.7.26/src/u16x8_.rs.orig   2025-02-15 21:41:16.297121103 +0000
++++ ../vendor/wide-0.7.26/src/u16x8_.rs
+@@ -25,7 +25,7 @@ pick! {
+     }
+ 
+     impl Eq for u16x8 { }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+       use core::arch::aarch64::*;
+       #[repr(C)]
+       #[derive(Copy, Clone)]
+@@ -70,7 +70,7 @@ impl Add for u16x8 {
+         Self { sse: add_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u16x8_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vaddq_u16(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -98,7 +98,7 @@ impl Sub for u16x8 {
+         Self { sse: sub_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u16x8_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vsubq_u16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -126,7 +126,7 @@ impl Mul for u16x8 {
+         Self { sse: mul_i16_keep_low_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u16x8_mul(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vmulq_u16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -208,7 +208,7 @@ impl BitAnd for u16x8 {
+         Self { sse: bitand_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vandq_u16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -236,7 +236,7 @@ impl BitOr for u16x8 {
+         Self { sse: bitor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vorrq_u16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -264,7 +264,7 @@ impl BitXor for u16x8 {
+         Self { sse: bitxor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: veorq_u16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -296,7 +296,7 @@ macro_rules! impl_shl_t_for_u16x8 {
+             Self { sse: shl_all_u16_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: u16x8_shl(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+             unsafe {Self { neon: vshlq_u16(self.neon, vmovq_n_s16(rhs as i16)) }}
+           } else {
+             let u = rhs as u64;
+@@ -332,7 +332,7 @@ macro_rules! impl_shr_t_for_u16x8 {
+             Self { sse: shr_all_u16_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: u16x8_shr(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+             unsafe {Self { neon: vshlq_u16(self.neon, vmovq_n_s16( -(rhs as i16))) }}
+           } else {
+             let u = rhs as u64;
+@@ -368,7 +368,7 @@ impl u16x8 {
+         Self { sse: cmp_eq_mask_i16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u16x8_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vceqq_u16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -392,7 +392,7 @@ impl u16x8 {
+         Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vbslq_u16(self.neon, t.neon, f.neon) }}
+       } else {
+         generic_bit_blend(self, t, f)
+@@ -407,7 +407,7 @@ impl u16x8 {
+         Self { sse: max_u16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u16x8_max(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vmaxq_u16(self.neon, rhs.neon) }}
+       } else {
+         let arr: [u16; 8] = cast(self);
+@@ -433,7 +433,7 @@ impl u16x8 {
+         Self { sse: min_u16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u16x8_min(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vminq_u16(self.neon, rhs.neon) }}
+       } else {
+         let arr: [u16; 8] = cast(self);
+@@ -460,7 +460,7 @@ impl u16x8 {
+         Self { sse: add_saturating_u16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u16x8_add_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vqaddq_u16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -484,7 +484,7 @@ impl u16x8 {
+         Self { sse: sub_saturating_u16_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u16x8_sub_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vqsubq_u16(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_u32x4__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_u32x4__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_u32x4__.rs  Sat Feb 15 23:41:47 2025
@@ -0,0 +1,153 @@
+$NetBSD: patch-.._vendor_wide-0.7.26_src_u32x4__.rs,v 1.1 2025/02/15 23:41:47 he Exp $
+
+Do not try to use neon / SIMD in big-endian mode on aarch64.
+
+--- ../vendor/wide-0.7.26/src/u32x4_.rs.orig   2025-02-15 21:42:28.717681165 +0000
++++ ../vendor/wide-0.7.26/src/u32x4_.rs
+@@ -25,7 +25,7 @@ pick! {
+     }
+ 
+     impl Eq for u32x4 { }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -70,7 +70,7 @@ impl Add for u32x4 {
+         Self { sse: add_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u32x4_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vaddq_u32(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -94,7 +94,7 @@ impl Sub for u32x4 {
+         Self { sse: sub_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u32x4_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vsubq_u32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -118,7 +118,7 @@ impl Mul for u32x4 {
+         Self { sse: mul_32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u32x4_mul(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vmulq_u32(self.neon, rhs.neon) }}
+       } else {
+         let arr1: [u32; 4] = cast(self);
+@@ -198,7 +198,7 @@ impl BitAnd for u32x4 {
+         Self { sse: bitand_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vandq_u32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -222,7 +222,7 @@ impl BitOr for u32x4 {
+         Self { sse: bitor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vorrq_u32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -246,7 +246,7 @@ impl BitXor for u32x4 {
+         Self { sse: bitxor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: veorq_u32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -274,7 +274,7 @@ macro_rules! impl_shl_t_for_u32x4 {
+             Self { sse: shl_all_u32_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: u32x4_shl(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+             unsafe {Self { neon: vshlq_u32(self.neon, vmovq_n_s32(rhs as i32)) }}
+           } else {
+             let u = rhs as u64;
+@@ -306,7 +306,7 @@ macro_rules! impl_shr_t_for_u32x4 {
+             Self { sse: shr_all_u32_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: u32x4_shr(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+             unsafe {Self { neon: vshlq_u32(self.neon, vmovq_n_s32( -(rhs as i32))) }}
+           } else {
+             let u = rhs as u64;
+@@ -339,7 +339,7 @@ impl Shr<u32x4> for u32x4 {
+         // mask the shift count to 31 to have same behavior on all platforms
+         let shift_by = bitand_m128i(rhs.sse, set_splat_i32_m128i(31));
+         Self { sse: shr_each_u32_m128i(self.sse, shift_by) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {
+           // mask the shift count to 31 to have same behavior on all platforms
+           // no right shift, have to pass negative value to left shift on neon
+@@ -375,7 +375,7 @@ impl Shl<u32x4> for u32x4 {
+         // mask the shift count to 31 to have same behavior on all platforms
+         let shift_by = bitand_m128i(rhs.sse, set_splat_i32_m128i(31));
+         Self { sse: shl_each_u32_m128i(self.sse, shift_by) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {
+           // mask the shift count to 31 to have same behavior on all platforms
+           let shift_by = vreinterpretq_s32_u32(vandq_u32(rhs.neon, vmovq_n_u32(31)));
+@@ -409,7 +409,7 @@ impl u32x4 {
+         Self { sse: cmp_eq_mask_i32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u32x4_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vceqq_u32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -431,7 +431,7 @@ impl u32x4 {
+         Self { sse: cmp_gt_mask_i32_m128i((self ^ h).sse, (rhs ^ h).sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u32x4_gt(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vcgtq_u32(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -458,7 +458,7 @@ impl u32x4 {
+         Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vbslq_u32(self.neon, t.neon, f.neon) }}
+       } else {
+         generic_bit_blend(self, t, f)
+@@ -473,9 +473,9 @@ impl u32x4 {
+         Self { sse: max_u32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u32x4_max(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vmaxq_u32(self.neon, rhs.neon) }}
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vmaxq_u16(self.neon, rhs.neon) }}
+       } else {
+         let arr: [u32; 4] = cast(self);
+@@ -497,7 +497,7 @@ impl u32x4 {
+         Self { sse: min_u32_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u32x4_min(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vminq_u32(self.neon, rhs.neon) }}
+       } else {
+         let arr: [u32; 4] = cast(self);
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_u64x2__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_u64x2__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_u64x2__.rs  Sat Feb 15 23:41:47 2025
@@ -0,0 +1,105 @@
+$NetBSD: patch-.._vendor_wide-0.7.26_src_u64x2__.rs,v 1.1 2025/02/15 23:41:47 he Exp $
+
+Do not try to use neon / SIMD in big-endian mode on aarch64.
+
+--- ../vendor/wide-0.7.26/src/u64x2_.rs.orig   2025-02-15 21:43:43.641452364 +0000
++++ ../vendor/wide-0.7.26/src/u64x2_.rs
+@@ -25,7 +25,7 @@ pick! {
+     }
+ 
+     impl Eq for u64x2 { }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -72,7 +72,7 @@ impl Add for u64x2 {
+         Self { sse: add_i64_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u64x2_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vaddq_u64(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -94,7 +94,7 @@ impl Sub for u64x2 {
+         Self { sse: sub_i64_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u64x2_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vsubq_u64(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -191,7 +191,7 @@ impl BitAnd for u64x2 {
+         Self { sse: bitand_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vandq_u64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -213,7 +213,7 @@ impl BitOr for u64x2 {
+         Self { sse: bitor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vorrq_u64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -235,7 +235,7 @@ impl BitXor for u64x2 {
+         Self { sse: bitxor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: veorq_u64(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -261,7 +261,7 @@ macro_rules! impl_shl_t_for_u64x2 {
+             Self { sse: shl_all_u64_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: u64x2_shl(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+             unsafe {Self { neon: vshlq_u64(self.neon, vmovq_n_s64(rhs as i64)) }}
+           } else {
+             let u = rhs as u64;
+@@ -291,7 +291,7 @@ macro_rules! impl_shr_t_for_u64x2 {
+             Self { sse: shr_all_u64_m128i(self.sse, shift) }
+           } else if #[cfg(target_feature="simd128")] {
+             Self { simd: u64x2_shr(self.simd, rhs as u32) }
+-          } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++          } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+             unsafe {Self { neon: vshlq_u64(self.neon, vmovq_n_s64(-(rhs as i64))) }}
+           } else {
+             let u = rhs as u64;
+@@ -321,7 +321,7 @@ impl u64x2 {
+         Self { sse: cmp_eq_mask_i64_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u64x2_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vceqq_u64(self.neon, rhs.neon) } }
+       } else {
+         let s: [u64;2] = cast(self);
+@@ -341,7 +341,7 @@ impl u64x2 {
+         // no unsigned gt so inverting the high bit will get the correct result
+         let highbit = u64x2::splat(1 << 63);
+         Self { sse: cmp_gt_mask_i64_m128i((self ^ highbit).sse, (rhs ^ highbit).sse) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vcgtq_u64(self.neon, rhs.neon) }}
+       } else {
+         // u64x2_gt on WASM is not a thing. https://github.com/WebAssembly/simd/pull/414
+@@ -370,7 +370,7 @@ impl u64x2 {
+         Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vbslq_u64(self.neon, t.neon, f.neon) }}
+       } else {
+         generic_bit_blend(self, t, f)
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_u8x16__.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_u8x16__.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_wide-0.7.26_src_u8x16__.rs  Sat Feb 15 23:41:47 2025
@@ -0,0 +1,141 @@
+$NetBSD: patch-.._vendor_wide-0.7.26_src_u8x16__.rs,v 1.1 2025/02/15 23:41:47 he Exp $
+
+Do not try to use neon / SIMD in big-endian mode on aarch64.
+
+--- ../vendor/wide-0.7.26/src/u8x16_.rs.orig   2025-02-15 21:44:41.933460113 +0000
++++ ../vendor/wide-0.7.26/src/u8x16_.rs
+@@ -25,7 +25,7 @@ pick! {
+     }
+ 
+     impl Eq for u8x16 { }
+-  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++  } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+     use core::arch::aarch64::*;
+     #[repr(C)]
+     #[derive(Copy, Clone)]
+@@ -70,7 +70,7 @@ impl Add for u8x16 {
+         Self { sse: add_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u8x16_add(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vaddq_u8(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -106,7 +106,7 @@ impl Sub for u8x16 {
+         Self { sse: sub_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u8x16_sub(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vsubq_u8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -178,7 +178,7 @@ impl BitAnd for u8x16 {
+         Self { sse: bitand_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_and(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vandq_u8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -214,7 +214,7 @@ impl BitOr for u8x16 {
+         Self { sse: bitor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_or(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vorrq_u8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -250,7 +250,7 @@ impl BitXor for u8x16 {
+         Self { sse: bitxor_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_xor(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: veorq_u8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -290,7 +290,7 @@ impl u8x16 {
+         Self { sse: cmp_eq_mask_i8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u8x16_eq(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vceqq_u8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -322,7 +322,7 @@ impl u8x16 {
+         Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vbslq_u8(self.neon, t.neon, f.neon) }}
+       } else {
+         generic_bit_blend(self, t, f)
+@@ -337,7 +337,7 @@ impl u8x16 {
+         Self { sse: max_u8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u8x16_max(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vmaxq_u8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -369,7 +369,7 @@ impl u8x16 {
+         Self { sse: min_u8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u8x16_min(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vminq_u8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -402,7 +402,7 @@ impl u8x16 {
+         Self { sse: add_saturating_u8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u8x16_add_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe {Self { neon: vqaddq_u8(self.neon, rhs.neon) }}
+       } else {
+         Self { arr: [
+@@ -434,7 +434,7 @@ impl u8x16 {
+         Self { sse: sub_saturating_u8_m128i(self.sse, rhs.sse) }
+       } else if #[cfg(target_feature="simd128")] {
+         Self { simd: u8x16_sub_sat(self.simd, rhs.simd) }
+-      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
++      } else if #[cfg(all(target_feature="neon",target_arch="aarch64",target_endian="little"))]{
+         unsafe { Self { neon: vqsubq_u8(self.neon, rhs.neon) } }
+       } else {
+         Self { arr: [
+@@ -468,7 +468,7 @@ impl u8x16 {
+             u8x16 { sse: unpack_low_i8_m128i(lhs.sse, rhs.sse) }
+         } else if #[cfg(target_feature = "simd128")] {
+           u8x16 { simd: u8x16_shuffle::<0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23>(lhs.simd, rhs.simd) }
+-        } else if #[cfg(all(target_feature = "neon", target_arch = "aarch64"))] {
++        } else if #[cfg(all(target_feature = "neon", target_arch = "aarch64",target_endian="little"))] {
+             let lhs = unsafe { vget_low_u8(lhs.neon) };
+             let rhs = unsafe { vget_low_u8(rhs.neon) };
+ 
+@@ -498,7 +498,7 @@ impl u8x16 {
+             u8x16 { sse: unpack_high_i8_m128i(lhs.sse, rhs.sse) }
+         } else if #[cfg(target_feature = "simd128")] {
+             u8x16 { simd: u8x16_shuffle::<8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31>(lhs.simd, rhs.simd) }
+-        } else if #[cfg(all(target_feature = "neon", target_arch = "aarch64"))] {
++        } else if #[cfg(all(target_feature = "neon", target_arch = "aarch64",target_endian="little"))] {
+             let lhs = unsafe { vget_high_u8(lhs.neon) };
+             let rhs = unsafe { vget_high_u8(rhs.neon) };
+ 
+@@ -528,7 +528,7 @@ impl u8x16 {
+             u8x16 { sse: pack_i16_to_u8_m128i(lhs.sse, rhs.sse) }
+         } else if #[cfg(target_feature = "simd128")] {
+             u8x16 { simd: u8x16_narrow_i16x8(lhs.simd, rhs.simd) }
+-        } else if #[cfg(all(target_feature = "neon", target_arch = "aarch64"))] {
++        } else if #[cfg(all(target_feature = "neon", target_arch = "aarch64",target_endian="little"))] {
+             let lhs = unsafe { vqmovun_s16(lhs.neon) };
+             let rhs = unsafe { vqmovun_s16(rhs.neon) };
+             u8x16 { neon: unsafe { vcombine_u8(lhs, rhs) } }
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_zerocopy-0.7.35_src_lib.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_zerocopy-0.7.35_src_lib.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_zerocopy-0.7.35_src_lib.rs  Sat Feb 15 23:41:47 2025
@@ -0,0 +1,24 @@
+$NetBSD: patch-.._vendor_zerocopy-0.7.35_src_lib.rs,v 1.1 2025/02/15 23:41:47 he Exp $
+
+Attempt at skipping SIMD / neon on big-endian aarch64,
+
+--- ../vendor/zerocopy-0.7.35/src/lib.rs.orig  2025-02-15 21:00:36.601714944 +0000
++++ ../vendor/zerocopy-0.7.35/src/lib.rs
+@@ -3736,7 +3736,7 @@ mod simd {
+             powerpc64, powerpc64, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long
+         );
+         simd_arch_mod!(
+-            #[cfg(target_arch = "aarch64")]
++            #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+             aarch64, aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
+             int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t,
+             int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t,
+@@ -8026,7 +8026,7 @@ mod tests {
+                 vector_signed_long,
+                 vector_unsigned_long
+             );
+-            #[cfg(target_arch = "aarch64")]
++            #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+             #[rustfmt::skip]
+             test_simd_arch_mod!(
+                 aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_zune-jpeg-0.4.13_src_idct.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_zune-jpeg-0.4.13_src_idct.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_zune-jpeg-0.4.13_src_idct.rs        Sat Feb 15 23:41:47 2025
@@ -0,0 +1,33 @@
+$NetBSD: patch-.._vendor_zune-jpeg-0.4.13_src_idct.rs,v 1.1 2025/02/15 23:41:47 he Exp $
+
+Don't try to do neon / SIMD on big-endian aarch64.
+
+--- ../vendor/zune-jpeg-0.4.13/src/idct.rs.orig        2025-02-15 21:55:11.774911146 +0000
++++ ../vendor/zune-jpeg-0.4.13/src/idct.rs
+@@ -41,7 +41,7 @@ use crate::idct::scalar::idct_int;
+ 
+ #[cfg(feature = "x86")]
+ pub mod avx2;
+-#[cfg(feature = "neon")]
++#[cfg(all(feature = "neon", target_endian = "little"))]
+ pub mod neon;
+ 
+ pub mod scalar;
+@@ -58,7 +58,7 @@ pub fn choose_idct_func(options: &Decode
+             return crate::idct::avx2::idct_avx2;
+         }
+     }
+-    #[cfg(target_arch = "aarch64")]
++    #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+     #[cfg(feature = "neon")]
+     {
+         if options.use_neon() {
+@@ -129,7 +129,7 @@ mod tests {
+ 
+     fn idct_fnc() -> IDCTPtr {
+         #[cfg(feature = "neon")]
+-        #[cfg(target_arch = "aarch64")]
++        #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
+         {
+             use crate::idct::neon::idct_neon;
+             return idct_neon;
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_zune-jpeg-0.4.13_src_unsafe__utils.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_zune-jpeg-0.4.13_src_unsafe__utils.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_zune-jpeg-0.4.13_src_unsafe__utils.rs       Sat Feb 15 23:41:47 2025
@@ -0,0 +1,12 @@
+$NetBSD: patch-.._vendor_zune-jpeg-0.4.13_src_unsafe__utils.rs,v 1.1 2025/02/15 23:41:47 he Exp $
+
+Don't try to do neon / SIMD on big-endian aarch64.
+
+--- ../vendor/zune-jpeg-0.4.13/src/unsafe_utils.rs.orig        2025-02-15 21:57:52.824169081 +0000
++++ ../vendor/zune-jpeg-0.4.13/src/unsafe_utils.rs
+@@ -1,4 +1,4 @@
+ #[cfg(all(feature = "x86", any(target_arch = "x86", target_arch = "x86_64")))]
+ pub use crate::unsafe_utils_avx2::*;
+-#[cfg(all(feature = "neon", target_arch = "aarch64"))]
++#[cfg(all(feature = "neon", target_arch = "aarch64", target_endian = "little"))]
+ pub use crate::unsafe_utils_neon::*;
Index: pkgsrc/graphics/librsvg/patches/patch-.._vendor_zune-jpeg-0.4.13_src_unsafe__utils__neon.rs
diff -u /dev/null pkgsrc/graphics/librsvg/patches/patch-.._vendor_zune-jpeg-0.4.13_src_unsafe__utils__neon.rs:1.1
--- /dev/null   Sat Feb 15 23:41:47 2025
+++ pkgsrc/graphics/librsvg/patches/patch-.._vendor_zune-jpeg-0.4.13_src_unsafe__utils__neon.rs Sat Feb 15 23:41:47 2025
@@ -0,0 +1,15 @@
+$NetBSD: patch-.._vendor_zune-jpeg-0.4.13_src_unsafe__utils__neon.rs,v 1.1 2025/02/15 23:41:47 he Exp $
+
+Don't try to do neon / SIMD on big-endian aarch64.
+
+--- ../vendor/zune-jpeg-0.4.13/src/unsafe_utils_neon.rs.orig   2025-02-15 22:09:18.837627422 +0000
++++ ../vendor/zune-jpeg-0.4.13/src/unsafe_utils_neon.rs
+@@ -6,7 +6,7 @@
+  * You can redistribute it or modify it under terms of the MIT, Apache License or Zlib license
+  */
+ 
+-#![cfg(target_arch = "aarch64")]
++#![cfg(all(target_arch = "aarch64", target_endian = "little"))]
+ // TODO can this be extended to armv7
+ 
+ //! This module provides unsafe ways to do some things



Home | Main Index | Thread Index | Old Index