1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
|
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __TARGET_CORE_USER_H
#define __TARGET_CORE_USER_H
/* This header will be used by application too */
#include <linux/types.h>
#include <linux/uio.h>
#define TCMU_VERSION "2.0"
/**
* DOC: Ring Design
* Ring Design
* -----------
*
* The mmaped area is divided into three parts:
* 1) The mailbox (struct tcmu_mailbox, below);
* 2) The command ring;
* 3) Everything beyond the command ring (data).
*
* The mailbox tells userspace the offset of the command ring from the
* start of the shared memory region, and how big the command ring is.
*
* The kernel passes SCSI commands to userspace by putting a struct
* tcmu_cmd_entry in the ring, updating mailbox->cmd_head, and poking
* userspace via UIO's interrupt mechanism.
*
* tcmu_cmd_entry contains a header. If the header type is PAD,
* userspace should skip hdr->length bytes (mod cmdr_size) to find the
* next cmd_entry.
*
* Otherwise, the entry will contain offsets into the mmaped area that
* contain the cdb and data buffers -- the latter accessible via the
* iov array. iov addresses are also offsets into the shared area.
*
* When userspace is completed handling the command, set
* entry->rsp.scsi_status, fill in rsp.sense_buffer if appropriate,
* and also set mailbox->cmd_tail equal to the old cmd_tail plus
* hdr->length, mod cmdr_size. If cmd_tail doesn't equal cmd_head, it
* should process the next packet the same way, and so on.
*/
#define TCMU_MAILBOX_VERSION 2
#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
#define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
#define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
#define TCMU_MAILBOX_FLAG_CAP_TMR (1 << 2) /* TMR notifications */
#define TCMU_MAILBOX_FLAG_CAP_KEEP_BUF (1<<3) /* Keep buf after cmd completion */
struct tcmu_mailbox {
__u16 version;
__u16 flags;
__u32 cmdr_off;
__u32 cmdr_size;
__u32 cmd_head;
/* Updated by user. On its own cacheline */
__u32 cmd_tail __attribute__((__aligned__(ALIGN_SIZE)));
} __attribute__((packed));
enum tcmu_opcode {
TCMU_OP_PAD = 0,
TCMU_OP_CMD,
TCMU_OP_TMR,
};
/*
* Only a few opcodes, and length is 8-byte aligned, so use low bits for opcode.
*/
struct tcmu_cmd_entry_hdr {
__u32 len_op;
__u16 cmd_id;
__u8 kflags;
#define TCMU_UFLAG_UNKNOWN_OP 0x1
#define TCMU_UFLAG_READ_LEN 0x2
#define TCMU_UFLAG_KEEP_BUF 0x4
__u8 uflags;
} __attribute__((packed));
#define TCMU_OP_MASK 0x7
static __inline__ enum tcmu_opcode tcmu_hdr_get_op(__u32 len_op)
{
return len_op & TCMU_OP_MASK;
}
static __inline__ void tcmu_hdr_set_op(__u32 *len_op, enum tcmu_opcode op)
{
*len_op &= ~TCMU_OP_MASK;
*len_op |= (op & TCMU_OP_MASK);
}
static __inline__ __u32 tcmu_hdr_get_len(__u32 len_op)
{
return len_op & ~TCMU_OP_MASK;
}
static __inline__ void tcmu_hdr_set_len(__u32 *len_op, __u32 len)
{
*len_op &= TCMU_OP_MASK;
*len_op |= len;
}
/* Currently the same as SCSI_SENSE_BUFFERSIZE */
#define TCMU_SENSE_BUFFERSIZE 96
struct tcmu_cmd_entry {
struct tcmu_cmd_entry_hdr hdr;
union {
struct {
__u32 iov_cnt;
__u32 iov_bidi_cnt;
__u32 iov_dif_cnt;
__u64 cdb_off;
__u64 __pad1;
__u64 __pad2;
__DECLARE_FLEX_ARRAY(struct iovec, iov);
} req;
struct {
__u8 scsi_status;
__u8 __pad1;
__u16 __pad2;
__u32 read_len;
char sense_buffer[TCMU_SENSE_BUFFERSIZE];
} rsp;
};
} __attribute__((packed));
struct tcmu_tmr_entry {
struct tcmu_cmd_entry_hdr hdr;
#define TCMU_TMR_UNKNOWN 0
#define TCMU_TMR_ABORT_TASK 1
#define TCMU_TMR_ABORT_TASK_SET 2
#define TCMU_TMR_CLEAR_ACA 3
#define TCMU_TMR_CLEAR_TASK_SET 4
#define TCMU_TMR_LUN_RESET 5
#define TCMU_TMR_TARGET_WARM_RESET 6
#define TCMU_TMR_TARGET_COLD_RESET 7
/* Pseudo reset due to received PR OUT */
#define TCMU_TMR_LUN_RESET_PRO 128
__u8 tmr_type;
__u8 __pad1;
__u16 __pad2;
__u32 cmd_cnt;
__u64 __pad3;
__u64 __pad4;
__u16 cmd_ids[];
} __attribute__((packed));
#define TCMU_OP_ALIGN_SIZE sizeof(__u64)
enum tcmu_genl_cmd {
TCMU_CMD_UNSPEC,
TCMU_CMD_ADDED_DEVICE,
TCMU_CMD_REMOVED_DEVICE,
TCMU_CMD_RECONFIG_DEVICE,
TCMU_CMD_ADDED_DEVICE_DONE,
TCMU_CMD_REMOVED_DEVICE_DONE,
TCMU_CMD_RECONFIG_DEVICE_DONE,
TCMU_CMD_SET_FEATURES,
__TCMU_CMD_MAX,
};
#define TCMU_CMD_MAX (__TCMU_CMD_MAX - 1)
enum tcmu_genl_attr {
TCMU_ATTR_UNSPEC,
TCMU_ATTR_DEVICE,
TCMU_ATTR_MINOR,
TCMU_ATTR_PAD,
TCMU_ATTR_DEV_CFG,
TCMU_ATTR_DEV_SIZE,
TCMU_ATTR_WRITECACHE,
TCMU_ATTR_CMD_STATUS,
TCMU_ATTR_DEVICE_ID,
TCMU_ATTR_SUPP_KERN_CMD_REPLY,
__TCMU_ATTR_MAX,
};
#define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1)
#endif
|