@@ -697,8 +697,8 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
697697 }
698698}
699699
700- static bool io_cqring_add_overflow (struct io_ring_ctx * ctx ,
701- struct io_overflow_cqe * ocqe )
700+ static __cold bool io_cqring_add_overflow (struct io_ring_ctx * ctx ,
701+ struct io_overflow_cqe * ocqe )
702702{
703703 lockdep_assert_held (& ctx -> completion_lock );
704704
@@ -813,18 +813,37 @@ static inline struct io_cqe io_init_cqe(u64 user_data, s32 res, u32 cflags)
813813 return (struct io_cqe ) { .user_data = user_data , .res = res , .flags = cflags };
814814}
815815
816+ static __cold void io_cqe_overflow (struct io_ring_ctx * ctx , struct io_cqe * cqe ,
817+ struct io_big_cqe * big_cqe )
818+ {
819+ struct io_overflow_cqe * ocqe ;
820+
821+ ocqe = io_alloc_ocqe (ctx , cqe , big_cqe , GFP_KERNEL );
822+ spin_lock (& ctx -> completion_lock );
823+ io_cqring_add_overflow (ctx , ocqe );
824+ spin_unlock (& ctx -> completion_lock );
825+ }
826+
827+ static __cold bool io_cqe_overflow_locked (struct io_ring_ctx * ctx ,
828+ struct io_cqe * cqe ,
829+ struct io_big_cqe * big_cqe )
830+ {
831+ struct io_overflow_cqe * ocqe ;
832+
833+ ocqe = io_alloc_ocqe (ctx , cqe , big_cqe , GFP_ATOMIC );
834+ return io_cqring_add_overflow (ctx , ocqe );
835+ }
836+
816837bool io_post_aux_cqe (struct io_ring_ctx * ctx , u64 user_data , s32 res , u32 cflags )
817838{
818839 bool filled ;
819840
820841 io_cq_lock (ctx );
821842 filled = io_fill_cqe_aux (ctx , user_data , res , cflags );
822843 if (unlikely (!filled )) {
823- struct io_overflow_cqe * ocqe ;
824844 struct io_cqe cqe = io_init_cqe (user_data , res , cflags );
825845
826- ocqe = io_alloc_ocqe (ctx , & cqe , NULL , GFP_ATOMIC );
827- filled = io_cqring_add_overflow (ctx , ocqe );
846+ filled = io_cqe_overflow_locked (ctx , & cqe , NULL );
828847 }
829848 io_cq_unlock_post (ctx );
830849 return filled ;
@@ -840,13 +859,9 @@ void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
840859 lockdep_assert (ctx -> lockless_cq );
841860
842861 if (!io_fill_cqe_aux (ctx , user_data , res , cflags )) {
843- struct io_overflow_cqe * ocqe ;
844862 struct io_cqe cqe = io_init_cqe (user_data , res , cflags );
845863
846- ocqe = io_alloc_ocqe (ctx , & cqe , NULL , GFP_KERNEL );
847- spin_lock (& ctx -> completion_lock );
848- io_cqring_add_overflow (ctx , ocqe );
849- spin_unlock (& ctx -> completion_lock );
864+ io_cqe_overflow (ctx , & cqe , NULL );
850865 }
851866 ctx -> submit_state .cq_flush = true;
852867}
@@ -1450,17 +1465,10 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx)
14501465 */
14511466 if (!(req -> flags & (REQ_F_CQE_SKIP | REQ_F_REISSUE )) &&
14521467 unlikely (!io_fill_cqe_req (ctx , req ))) {
1453- gfp_t gfp = ctx -> lockless_cq ? GFP_KERNEL : GFP_ATOMIC ;
1454- struct io_overflow_cqe * ocqe ;
1455-
1456- ocqe = io_alloc_ocqe (ctx , & req -> cqe , & req -> big_cqe , gfp );
1457- if (ctx -> lockless_cq ) {
1458- spin_lock (& ctx -> completion_lock );
1459- io_cqring_add_overflow (ctx , ocqe );
1460- spin_unlock (& ctx -> completion_lock );
1461- } else {
1462- io_cqring_add_overflow (ctx , ocqe );
1463- }
1468+ if (ctx -> lockless_cq )
1469+ io_cqe_overflow (ctx , & req -> cqe , & req -> big_cqe );
1470+ else
1471+ io_cqe_overflow_locked (ctx , & req -> cqe , & req -> big_cqe );
14641472 }
14651473 }
14661474 __io_cq_unlock_post (ctx );
0 commit comments