diff options
author | David Howells <dhowells@redhat.com> | 2024-12-04 07:47:01 +0000 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2024-12-09 13:48:31 -0800 |
commit | a2ea9a9072607c2fd6442bd1ffb4dbdbf882aed7 (patch) | |
tree | 06b5cb2f466316d1479762cd903a7e2a055c8c10 /net/rxrpc/io_thread.c | |
parent | 08d55d7cf3f33c730ce2694393efe16b7983a9c8 (diff) |
rxrpc: Use irq-disabling spinlocks between app and I/O thread
Where a spinlock is used by both the application thread and the I/O thread,
use irq-disabling locking so that an interrupt taken on the app thread
doesn't also slow down the I/O thread.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/rxrpc/io_thread.c')
-rw-r--r-- | net/rxrpc/io_thread.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c index bc678a299bd8..fbacf2056f64 100644 --- a/net/rxrpc/io_thread.c +++ b/net/rxrpc/io_thread.c @@ -500,9 +500,9 @@ int rxrpc_io_thread(void *data) } /* Deal with connections that want immediate attention. */ - spin_lock_bh(&local->lock); + spin_lock_irq(&local->lock); list_splice_tail_init(&local->conn_attend_q, &conn_attend_q); - spin_unlock_bh(&local->lock); + spin_unlock_irq(&local->lock); while ((conn = list_first_entry_or_null(&conn_attend_q, struct rxrpc_connection, @@ -519,9 +519,9 @@ int rxrpc_io_thread(void *data) rxrpc_discard_expired_client_conns(local); /* Deal with calls that want immediate attention. */ - spin_lock_bh(&local->lock); + spin_lock_irq(&local->lock); list_splice_tail_init(&local->call_attend_q, &call_attend_q); - spin_unlock_bh(&local->lock); + spin_unlock_irq(&local->lock); while ((call = list_first_entry_or_null(&call_attend_q, struct rxrpc_call, |