aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/liburing/test/init-mem.c
blob: 1909e2ac5f19037958b3bbe57387d6f5391d4ca9 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
#include "../config-host.h"
/* SPDX-License-Identifier: MIT */
/*
 * Description: Check that io_uring_queue_init_mem() doesn't underestimate
 *		the memory required for various size rings.
 */
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <sys/mman.h>
#include <linux/mman.h>
#include <stdlib.h>
#include <string.h>
#include <netinet/udp.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <error.h>

#include "liburing.h"
#include "helpers.h"

#define PRE_RED	0x5aa55aa55aa55aa5ULL
#define POST_RED 0xa55aa55aa55aa55aULL

struct ctx {
	struct io_uring		ring;
	void			*ring_mem;
	void			*mem;
	unsigned long long	*pre;
	unsigned long long	*post;
};

struct q_entries {
	unsigned int sqes;
	unsigned int cqes;
};

static int setup_ctx(struct ctx *ctx, struct q_entries *q)
{
	struct io_uring_params p = { };
	int ret;

	if (posix_memalign(&ctx->mem, 4096, 2*1024*1024))
		return T_EXIT_FAIL;

	ctx->pre = ctx->mem + 4096 - sizeof(unsigned long);
	*ctx->pre = PRE_RED;

	ctx->ring_mem = ctx->mem + 4096;
	p.flags |= IORING_SETUP_CQSIZE | IORING_SETUP_NO_SQARRAY;
	p.sq_entries = q->sqes;
	p.cq_entries = q->cqes;

	ret = io_uring_queue_init_mem(q->sqes, &ctx->ring, &p,
					ctx->ring_mem, 2*1024*1024);

	if (ret < 0) {
		if (ret == -EINVAL)
			return T_EXIT_SKIP;
		fprintf(stderr, "queue init: %d\n", ret);
		return T_EXIT_FAIL;
	}

	ctx->post = ctx->ring_mem + ret;
	*ctx->post = POST_RED;
	return 0;
}

static void clean_ctx(struct ctx *ctx)
{
	io_uring_queue_exit(&ctx->ring);
}

static int check_red(struct ctx *ctx, unsigned long i)
{
	int fail = 0;

	if (*ctx->pre != PRE_RED) {
		printf("pre redzone=%llx at i=%lu\n", *ctx->pre, i);
		fail = 1;
	}
	if (*ctx->post != POST_RED) {
		printf("post redzone=%llx at i=%lu\n", *ctx->post, i);
		fail = 1;
	}
	return fail;
}

static int test(struct q_entries *q)
{
	struct io_uring_sqe *sqe;
	struct io_uring_cqe *cqe;
	struct ctx ctx = { };
	unsigned long i, ud;
	int j, ret, batch;

	ret = setup_ctx(&ctx, q);
	if (ret == T_EXIT_SKIP)
		return T_EXIT_SKIP;
	else if (ret != T_EXIT_PASS)
		return ret;

	batch = 64;
	if (batch > q->sqes)
		batch = q->sqes;

	i = ud = 0;
	while (i < q->cqes * 2) {
		if (check_red(&ctx, i))
			return T_EXIT_FAIL;
		for (j = 0; j < batch; j++) {
			sqe = io_uring_get_sqe(&ctx.ring);
			io_uring_prep_nop(sqe);
			sqe->user_data = j + (unsigned long) i;
		}
		io_uring_submit(&ctx.ring);
		for (j = 0; j < batch; j++) {
			ret = io_uring_wait_cqe(&ctx.ring, &cqe);
			if (ret)
				goto err;
			if (cqe->user_data != ud) {
				fprintf(stderr, "ud=%lu, wanted %lu\n", (unsigned long) cqe->user_data, ud);
				goto err;
			}
			ud++;
			io_uring_cqe_seen(&ctx.ring, cqe);
		}
		i += batch;
	}

	clean_ctx(&ctx);
	return T_EXIT_PASS;
err:
	clean_ctx(&ctx);
	return T_EXIT_FAIL;
}

int main(int argc, char *argv[])
{
	struct q_entries q_entries[] = {
		{ 256, 16384 },
		{ 32, 4096 },
		{ 128, 8192 },
		{ 4096, 32768 },
		{ 1, 8 },
		{ 2, 1024 },
	};
	int i, ret;

	if (argc > 1)
		return T_EXIT_SKIP;

	for (i = 0; i < ARRAY_SIZE(q_entries); i++) {
		ret = test(&q_entries[i]);
		if (ret == T_EXIT_SKIP) {
			return T_EXIT_SKIP;
		} else if (ret != T_EXIT_PASS) {
			fprintf(stderr, "Failed at %d/%d\n", q_entries[i].sqes,
							q_entries[i].cqes);
			return T_EXIT_FAIL;
		}
	}

	return T_EXIT_PASS;
}