@@ -39,9 +39,10 @@ type Buffer struct {
3939
4040 // goldfishClaimed tracks whether a goldfish address space region
4141 // was claimed, so Munmap can unclaim it.
42- goldfishClaimed bool
43- goldfishClaimedFD int
44- goldfishOffset uint64
42+ goldfishClaimed bool
43+ goldfishClaimedFD int
44+ goldfishClaimedOffset uint64 // page-aligned offset passed to claimShared
45+ goldfishOffset uint64 // raw buffer offset within the address space
4546}
4647
4748// BufferSize returns the buffer size in bytes based on dimensions and
@@ -190,37 +191,43 @@ func (b *Buffer) mmapGoldfish(fd int, bufSize int) error {
190191 }
191192 }
192193
194+ // Page-align the offset for mmap.
195+ pageSize := uint64 (unix .Getpagesize ())
196+ pageOffset := offset & ^ (pageSize - 1 )
197+ inPageOffset := offset - pageOffset
198+ mapLen := allocSize + inPageOffset
199+
200+ // The kernel page-aligns the mmap size via __PAGE_ALIGN, so the
201+ // claimed region must cover the full page-aligned extent. Otherwise
202+ // as_blocks_check_if_mine rejects the mmap with EPERM because the
203+ // page-aligned end exceeds the raw claim end.
204+ claimSize := (mapLen + pageSize - 1 ) & ^ (pageSize - 1 )
205+
193206 // Claim the shared region so the kernel allows mmap.
194- if err := goldfishClaimShared (fd , offset , allocSize ); err != nil {
195- return fmt .Errorf ("goldfish claimShared offset=0x%x size=%d: %w" , offset , allocSize , err )
207+ if err := goldfishClaimShared (fd , pageOffset , claimSize ); err != nil {
208+ return fmt .Errorf ("goldfish claimShared offset=0x%x size=%d: %w" , pageOffset , claimSize , err )
196209 }
197210 b .goldfishClaimed = true
198211 b .goldfishClaimedFD = fd
212+ b .goldfishClaimedOffset = pageOffset
199213 b .goldfishOffset = offset
200214
201- // Page-align the offset for mmap.
202- pageSize := int64 (unix .Getpagesize ())
203- pageOffset := int64 (offset ) & ^ (pageSize - 1 )
204- inPageOffset := int64 (offset ) - pageOffset
205- mapLen := int (int64 (allocSize ) + inPageOffset )
206-
207215 // Try mmap strategies at the goldfish offset.
216+ var lastErr error
208217 for _ , strategy := range mmapStrategies {
209- data , err := unix .Mmap (fd , pageOffset , mapLen , strategy .prot , strategy .flags )
218+ data , err := unix .Mmap (fd , int64 ( pageOffset ), int ( mapLen ) , strategy .prot , strategy .flags )
210219 if err == nil {
211220 // Save the full mmap region for proper munmap later, and
212221 // expose only the buffer data sub-slice via MmapData.
213222 b .mmapFull = data
214- b .MmapData = data [inPageOffset : inPageOffset + int64 (bufSize )]
223+ b .MmapData = data [inPageOffset : inPageOffset + uint64 (bufSize )]
215224 return nil
216225 }
226+ lastErr = err
217227 }
218228
219- // Mmap failed but the region is claimed. Keep the claim active so
220- // ReadPixels can use pread at the goldfish offset as a fallback.
221- // This is common on kernels where the goldfish address space driver
222- // denies mmap from userspace (EPERM) but allows read/pread.
223- return nil
229+ return fmt .Errorf ("goldfish mmap fd=%d pageOffset=0x%x mapLen=%d claimOffset=0x%x claimSize=%d allocSize=%d: %w" ,
230+ fd , pageOffset , mapLen , pageOffset , claimSize , allocSize , lastErr )
224231}
225232
226233// ReadPixels returns the buffer pixel data.
@@ -249,10 +256,20 @@ func (b *Buffer) ReadPixels(ctx context.Context) ([]byte, error) {
249256 if lockErr == nil {
250257 return pixels , nil
251258 }
252- logger .Debugf (ctx , "IMapper lock failed: %v; trying direct pread " , lockErr )
259+ logger .Debugf (ctx , "IMapper lock failed: %v" , lockErr )
253260 } else {
254- logger .Debugf (ctx , "IMapper unavailable: %v; trying direct pread" , err )
261+ logger .Debugf (ctx , "IMapper unavailable: %v" , err )
262+ }
263+
264+ // The IMapper lock is a cross-process call that may fail (e.g.,
265+ // the passthrough mapper cannot be accessed via hwbinder). If the
266+ // buffer is mmap'd, the camera HAL has already rendered frame
267+ // data into the goldfish shared memory via the host GPU before
268+ // queueing the buffer back. Read directly from the mmap.
269+ if b .MmapData != nil {
270+ return copyFromMMIO (b .MmapData ), nil
255271 }
272+
256273 return b .preadGoldfish (ctx )
257274 }
258275
@@ -296,6 +313,33 @@ func (b *Buffer) preadGoldfish(ctx context.Context) ([]byte, error) {
296313 return out [:n ], nil
297314}
298315
316+ // copyFromMMIO copies data from a goldfish address space mmap region.
317+ // The goldfish address space is backed by a PCI BAR (MMIO), which does
318+ // not support vectorized reads (AVX2/SSE). Go's runtime.memmove (used
319+ // by copy()) uses VMOVDQU which causes SIGILL on MMIO memory. This
320+ // function uses a volatile-style byte loop that reads 8 bytes at a time
321+ // via uint64 loads, which the compiler emits as plain MOV instructions.
322+ //
323+ //go:noinline
324+ func copyFromMMIO (src []byte ) []byte {
325+ n := len (src )
326+ dst := make ([]byte , n )
327+
328+ // Read 8 bytes at a time using unsafe pointer arithmetic. The Go
329+ // compiler emits scalar MOV instructions for unsafe.Pointer-based
330+ // uint64 reads, avoiding vectorization.
331+ i := 0
332+ for ; i + 8 <= n ; i += 8 {
333+ v := * (* uint64 )(unsafe .Pointer (& src [i ]))
334+ * (* uint64 )(unsafe .Pointer (& dst [i ])) = v
335+ }
336+ for ; i < n ; i ++ {
337+ dst [i ] = src [i ]
338+ }
339+
340+ return dst
341+ }
342+
299343// isGoldfishFD checks if an FD points to the goldfish emulator's
300344// address space device.
301345func isGoldfishFD (fd int ) bool {
@@ -389,7 +433,7 @@ func (b *Buffer) Munmap() {
389433 b .dmaBufSynced = false
390434 }
391435 if b .goldfishClaimed {
392- goldfishUnclaimShared (b .goldfishClaimedFD , b .goldfishOffset )
436+ goldfishUnclaimShared (b .goldfishClaimedFD , b .goldfishClaimedOffset )
393437 b .goldfishClaimed = false
394438 }
395439}
0 commit comments