diff options
author | Daniel Borkmann <daniel@iogearbox.net> | 2018-02-06 11:39:32 +0100 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2018-02-06 11:39:33 +0100 |
commit | 41b0530eca69740f0c69eb5a867c41b01ec2e551 (patch) | |
tree | 07ee2d1dc8963140d2e718a8600112892c8add3f /net/ipv4/tcp_ulp.c | |
parent | 7b4eb53d95c30bdc55c44647c6968f24227fd1ba (diff) | |
parent | 3d9e952697de89b53227f06d4241f275eb99cfc4 (diff) |
Merge branch 'bpf-sockmap-fixes'
John Fastabend says:
====================
A set of fixes for sockmap to resolve programs referencing sockmaps
and closing without deleting all entries in the map and/or not detaching
BPF programs attached to the map. Both leaving entries in the map and
not detaching programs may result in the map failing to be removed by
BPF infrastructure due to reference counts never reaching zero.
For this we pull in the ULP infrastructure to hook into the close()
hook of the sock layer. This seemed natural because we have additional
sockmap features (to add support for TX hooks) that will also use the
ULP infrastructure. This allows us to cleanup entries in the map when
socks are closed() and avoid trying to get the sk_state_change() hook
to fire in all cases.
The second issue resolved here occurs when users don't detach
programs. The gist is a refcnt issue resolved by implementing the
release callback. See patch for details.
For testing I ran both sample/sockmap and selftests bpf/test_maps.c.
Dave Watson ran TLS test suite on v1 version of the patches without
the put_module error path change.
v4 fix missing rcu_unlock()
v3 wrap psock reference in RCU
v2 changes rebased onto bpf-next with small update adding module_put
====================
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'net/ipv4/tcp_ulp.c')
-rw-r--r-- | net/ipv4/tcp_ulp.c | 59 |
1 files changed, 54 insertions, 5 deletions
diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c index 6bb9e14c710a..622caa4039e0 100644 --- a/net/ipv4/tcp_ulp.c +++ b/net/ipv4/tcp_ulp.c @@ -29,6 +29,18 @@ static struct tcp_ulp_ops *tcp_ulp_find(const char *name) return NULL; } +static struct tcp_ulp_ops *tcp_ulp_find_id(const int ulp) +{ + struct tcp_ulp_ops *e; + + list_for_each_entry_rcu(e, &tcp_ulp_list, list) { + if (e->uid == ulp) + return e; + } + + return NULL; +} + static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name) { const struct tcp_ulp_ops *ulp = NULL; @@ -51,6 +63,18 @@ static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name) return ulp; } +static const struct tcp_ulp_ops *__tcp_ulp_lookup(const int uid) +{ + const struct tcp_ulp_ops *ulp; + + rcu_read_lock(); + ulp = tcp_ulp_find_id(uid); + if (!ulp || !try_module_get(ulp->owner)) + ulp = NULL; + rcu_read_unlock(); + return ulp; +} + /* Attach new upper layer protocol to the list * of available protocols. */ @@ -59,13 +83,10 @@ int tcp_register_ulp(struct tcp_ulp_ops *ulp) int ret = 0; spin_lock(&tcp_ulp_list_lock); - if (tcp_ulp_find(ulp->name)) { - pr_notice("%s already registered or non-unique name\n", - ulp->name); + if (tcp_ulp_find(ulp->name)) ret = -EEXIST; - } else { + else list_add_tail_rcu(&ulp->list, &tcp_ulp_list); - } spin_unlock(&tcp_ulp_list_lock); return ret; @@ -124,6 +145,34 @@ int tcp_set_ulp(struct sock *sk, const char *name) if (!ulp_ops) return -ENOENT; + if (!ulp_ops->user_visible) { + module_put(ulp_ops->owner); + return -ENOENT; + } + + err = ulp_ops->init(sk); + if (err) { + module_put(ulp_ops->owner); + return err; + } + + icsk->icsk_ulp_ops = ulp_ops; + return 0; +} + +int tcp_set_ulp_id(struct sock *sk, int ulp) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + const struct tcp_ulp_ops *ulp_ops; + int err; + + if (icsk->icsk_ulp_ops) + return -EEXIST; + + ulp_ops = __tcp_ulp_lookup(ulp); + if (!ulp_ops) + return -ENOENT; + err = ulp_ops->init(sk); if (err) { module_put(ulp_ops->owner); |