NewMadeleine

Documentation

« back to PM2 home.
nm_core_inline.h
Go to the documentation of this file.
1/*
2 * NewMadeleine
3 * Copyright (C) 2006-2026 (see AUTHORS file)
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15
16#ifndef NM_CORE_INLINE_H
17#define NM_CORE_INLINE_H
18
24/* ** Driver management ************************************ */
25
28{
29 assert(p_drv != NULL);
30 int i;
31 for(i = 0; i < p_gate->n_trks; i++)
32 {
33 if(p_gate->trks[i].p_drv == p_drv)
34 return &p_gate->trks[i];
35 }
36 return NULL;
37}
38
41static inline struct nm_trk_s*nm_trk_get_by_index(nm_gate_t p_gate, int index)
42{
43 assert(p_gate->n_trks > 0);
44 assert(index < p_gate->n_trks);
45 assert(index >= 0);
46 return &p_gate->trks[index];
47}
48
50static inline nm_len_t nm_drv_max_small(const struct nm_drv_s*const p_drv)
51{
53 return (p_drv->props.capabilities.max_msg_size > max_small) ? max_small : p_drv->props.capabilities.max_msg_size;
54}
55
57static inline int nm_trk_can_recv(const struct nm_trk_s*p_trk)
58{
59 const int max_pkt_recvs = p_trk->p_drv->props.capabilities.max_pkt_recvs;
60 const int cur = nm_pkt_wrap_list_size(&p_trk->active_pw_recv);
61 assert(max_pkt_recvs >= 1);
62 assert(cur >= 0);
63 return (cur < max_pkt_recvs);
64}
65
67static inline int nm_trk_can_send(const struct nm_trk_s*p_trk)
68{
69 const int max_pkt_sends = p_trk->p_drv->props.capabilities.max_pkt_sends;
70 const int cur = nm_pkt_wrap_list_size(&p_trk->active_pw_send);
71 assert(max_pkt_sends >= 1);
72 assert(cur >= 0);
73 return (cur < max_pkt_sends);
74}
75
76/* ** Packet wrapper management **************************** */
77
79static inline void nm_pw_assign(struct nm_pkt_wrap_s*p_pw, nm_trk_id_t trk_id, struct nm_drv_s*p_drv, nm_gate_t p_gate)
80{
81 p_pw->trk_id = trk_id;
82 if(p_gate == NM_GATE_NONE)
83 {
84 assert(p_drv->p_pw_recv_any == NULL);
85 assert(p_drv != NULL);
86 p_drv->p_pw_recv_any = p_pw;
87 p_pw->p_trk = NULL;
88 p_pw->p_drv = p_drv;
89 }
90 else
91 {
92 assert(trk_id >= 0);
93 assert(trk_id < p_gate->n_trks);
94 p_pw->p_gate = p_gate;
95 p_pw->p_trk = &p_gate->trks[trk_id];
96 p_pw->p_drv = p_pw->p_trk->p_drv;
97 }
98}
99
101#define nm_pw_ref_inc(P_PW, LABEL) nm_refcount_inc(&(P_PW)->refcount, (LABEL))
102
104static inline void nm_pw_ref_dec(struct nm_pkt_wrap_s*p_pw, const char*label)
105{
106 nm_core_lock_assert(nm_core_get_singleton()); /* needed by nm_pw_free, check early for easier debug */
107 const int count = nm_refcount_dec(&p_pw->refcount, label);
108 assert(count >= 0);
109 if(count == 0)
110 {
111 struct nm_core*p_core = nm_core_get_singleton();
112 nm_pw_free(p_core, p_pw);
113 }
114}
116static inline void nm_pw_ref_dec_free(struct nm_pkt_wrap_s*p_pw, const char*label)
117{
118 assert(nm_refcount_get(&p_pw->refcount) == 1);
119 nm_pw_ref_dec(p_pw, label);
120}
121
122static inline void nm_header_global_finalize(struct nm_pkt_wrap_s*p_pw)
123{
124 struct nm_header_global_s*h = p_pw->v[0].iov_base;
125 const int v0len = p_pw->v[0].iov_len;
126 assert(p_pw->trk_id == NM_TRK_SMALL);
127 assert(v0len <= UINT16_MAX);
128 h->v0len = v0len;
129}
130
131static inline uint16_t nm_header_global_v0len(const struct nm_pkt_wrap_s*p_pw)
132{
134 const struct nm_header_global_s*h = p_pw->v[0].iov_base;
135 return h->v0len;
136}
137
138/* ** Gate ************************************************* */
139
140static inline int nm_gate_isactive(struct nm_gate_s*p_gate)
141{
142 return( (!nm_req_chunk_list_empty(&p_gate->req_chunk_list)) ||
143 (!nm_ctrl_chunk_list_empty(&p_gate->ctrl_chunk_list)) ||
144 (!nm_pkt_wrap_list_empty(&p_gate->pending_large_recv)));
145}
146
147static inline int nm_gate_is_in_active_list(struct nm_core*p_core, struct nm_gate_s*p_gate)
148{
149 return ((p_gate == nm_active_gate_list_begin(&p_core->active_gates)) ||
150 (!nm_active_gate_list_cell_isnull(p_gate)));
151}
152
154static inline void nm_gate_set_active(struct nm_gate_s*p_gate)
155{
159 {
160 assert(!nm_active_gate_list_contains(&p_gate->p_core->active_gates, p_gate));
161 nm_active_gate_list_push_back(&p_gate->p_core->active_gates, p_gate);
163 }
164}
165
166/* ** Core tasks ******************************************* */
167
168static inline void nm_core_task_enqueue(struct nm_core*p_core, int holding_lock, struct nm_core_task_s*p_core_task)
169{
170 assert(p_core_task->kind != NM_CORE_TASK_NONE);
171 if(holding_lock)
172 {
173 nm_core_lock_assert(p_core);
174 }
175 else
176 {
177 nm_core_nolock_assert(p_core);
178 }
179 int rc;
180 do
181 {
182#ifdef PIOMAN_MULTITHREAD
183 rc = nm_core_task_lfqueue_enqueue(&p_core->pending_tasks, p_core_task);
184#else
185 rc = nm_core_task_lfqueue_enqueue_single_writer(&p_core->pending_tasks, p_core_task);
186#endif /* PIOMAN_MULTITHREAD */
187 if(rc)
188 {
189 if(holding_lock)
190 {
191 nm_core_task_flush(p_core);
192 }
193 else
194 {
195 if(nm_core_trylock(p_core))
196 {
197 nm_core_task_flush(p_core);
198 nm_core_unlock(p_core);
199 }
200 }
201 }
202 }
203 while(rc);
204}
205
207static inline void nm_pw_completed_enqueue(struct nm_core*p_core, struct nm_pkt_wrap_s*p_pw)
208{
209 nm_core_nolock_assert(p_core);
210 assert(!(p_pw->flags & NM_PW_COMPLETED));
211 p_pw->flags |= NM_PW_COMPLETED;
212#ifdef PIOMAN
213 piom_ltask_completed(&p_pw->ltask);
214#else /* PIOMAN */
215 nm_pw_poll_list_remove(&p_core->pw_poll_list, p_pw);
216#endif /* PIOMAN */
217 nm_pw_ref_inc(p_pw, nm_pw_refcount_completion); /* keep pw alive up to core_task completion */
220 p_pw->core_task.content.completed_pw.p_pw = p_pw;
221 nm_core_task_enqueue(p_core, 0, &p_pw->core_task);
222}
223
224
225/* ** req chunks ******************************************* */
226
227static inline void nm_req_chunk_submit(struct nm_core*p_core, struct nm_req_chunk_s*p_req_chunk)
228{
229 nm_core_nolock_assert(p_core);
230 assert(p_req_chunk->p_req != NULL);
231 assert(p_req_chunk->core_task.kind == NM_CORE_TASK_NONE);
233 p_req_chunk->core_task.content.pack_submission.p_req_chunk = p_req_chunk;
234 nm_core_task_enqueue(p_core, 0, &p_req_chunk->core_task);
235}
236
237static inline void nm_req_chunk_destroy(struct nm_core*p_core, struct nm_req_chunk_s*p_req_chunk)
238{
239 struct nm_req_s*p_pack = p_req_chunk->p_req;
240 nm_req_chunk_list_check_null(p_req_chunk);
241#ifdef NMAD_DEBUG
242 p_req_chunk->chunk_len = NM_LEN_UNDEFINED;
243 p_req_chunk->chunk_offset = NM_LEN_UNDEFINED;
244#endif
245 if(p_req_chunk != &p_pack->req_chunk)
246 {
247 nm_req_chunk_free(&p_core->req_chunk_allocator, p_req_chunk);
248 }
249 else
250 {
251 p_pack->req_chunk.p_req = NULL;
252 }
253}
254
255static inline struct nm_req_chunk_s*nm_req_chunk_alloc(struct nm_core*p_core)
256{
257 struct nm_req_chunk_s*p_req_chunk = nm_req_chunk_malloc(&p_core->req_chunk_allocator);
258 nm_req_chunk_list_cell_init(p_req_chunk);
259 return p_req_chunk;
260}
261
262static inline void nm_req_chunk_init(struct nm_req_chunk_s*p_req_chunk, struct nm_req_s*p_req,
264{
265 nm_req_chunk_list_cell_init(p_req_chunk);
266 p_req_chunk->core_task.kind = NM_CORE_TASK_NONE;
267 p_req_chunk->p_req = p_req;
268 p_req_chunk->chunk_offset = chunk_offset;
269 p_req_chunk->chunk_len = chunk_len;
270 p_req_chunk->proto_flags = 0;
271 assert(chunk_offset + chunk_len <= p_req->pack.len);
273 {
274 p_req_chunk->proto_flags |= NM_PROTO_FLAG_LASTCHUNK;
275 }
277 {
278 p_req_chunk->proto_flags |= NM_PROTO_FLAG_ACKREQ;
279 }
281}
282
284static inline void nm_req_chunk_update(struct nm_req_chunk_s*p_req_chunk)
285{
286 assert(p_req_chunk->chunk_offset + p_req_chunk->chunk_len <= p_req_chunk->p_req->pack.len);
287 if(p_req_chunk->chunk_offset + p_req_chunk->chunk_len == p_req_chunk->p_req->pack.len)
288 {
289 p_req_chunk->proto_flags |= NM_PROTO_FLAG_LASTCHUNK;
290 }
291 else
292 {
293 p_req_chunk->proto_flags &= ~NM_PROTO_FLAG_LASTCHUNK;
294 }
295 nm_data_chunk_properties_compute(&p_req_chunk->p_req->data, p_req_chunk->chunk_offset, p_req_chunk->chunk_len, &p_req_chunk->chunk_props);
296}
297
300static inline void nm_core_post_req_chunk(nm_core_t p_core, nm_gate_t p_gate, struct nm_req_chunk_s*p_req_chunk)
301{
302 nm_core_lock_assert(p_core);
303 nm_req_chunk_list_push_back(&p_gate->req_chunk_list, p_req_chunk);
305}
306
310 const void*p_payload, nm_len_t payload_size)
311{
312 struct nm_core*p_core = p_gate->p_core;
313 nm_core_lock_assert(p_core);
314 struct nm_ctrl_chunk_s*p_ctrl_chunk = nm_ctrl_chunk_malloc(&p_core->ctrl_chunk_allocator);
315 nm_ctrl_chunk_list_cell_init(p_ctrl_chunk);
316 p_ctrl_chunk->ctrl = *p_header;
317 p_ctrl_chunk->p_gate = p_gate;
318 p_ctrl_chunk->p_payload = p_payload;
319 p_ctrl_chunk->size = payload_size;
320 nm_ctrl_chunk_list_push_back(&p_gate->ctrl_chunk_list, p_ctrl_chunk);
322}
323
328 const void*p_rdv_data, nm_len_t size)
329{
331 if(chunk_len == 0)
332 {
333 NM_FATAL("trying to post RTR with chunk_offset = %llu; chunk_len = %llu\n", (unsigned long long)chunk_offset, (unsigned long long)chunk_len);
334 }
336 nm_core_post_ctrl(p_gate, &h, p_rdv_data, size);
337}
338
347
354
356static inline void nm_core_polling_level(struct nm_core*p_core)
357{
358 nm_core_lock_assert(p_core);
359 assert(p_core->n_packs >= 0);
360 assert(p_core->n_unpacks >= 0);
361#ifdef PIOMAN
362 const int high =
363 (p_core->n_packs > 0) ||
364 (p_core->n_unpacks > 0) ||
365 (!nm_core_task_lfqueue_empty(&p_core->pending_tasks));
366 piom_ltask_poll_level_set(high);
367#endif /* PIOMAN */
368}
369
370/* ** Tags & matching ************************************** */
371
372static inline struct nm_matching_wildcard_s*nm_matching_wildcard_bytag(struct nm_core*p_core, nm_core_tag_t core_tag)
373{
374 const nm_core_tag_t session_tag = nm_core_tag_build(nm_core_tag_get_hashcode(core_tag), 0);
375 struct nm_matching_wildcard_s*p_wildcard = nm_matching_wildcard_get(&p_core->wildcard_table, session_tag);
376 return p_wildcard;
377}
378
380{
381 const nm_core_tag_t session_tag = nm_core_tag_build(nm_core_tag_get_hashcode(core_tag), 0);
382 struct nm_matching_gsession_s*p_gsession = nm_matching_gsession_get(&p_gate->gsessions, session_tag);
383 return p_gsession;
384}
385
386
387#endif /* NM_CORE_INLINE_H */
static nm_session_hash_t nm_core_tag_get_hashcode(nm_core_tag_t core_tag)
static nm_core_tag_t nm_core_tag_build(nm_session_hash_t hashcode, nm_tag_t tag)
@ NM_CORE_TASK_PACK_SUBMISSION
process a submitted pack request
@ NM_CORE_TASK_NONE
@ NM_CORE_TASK_COMPLETED_PW
process a completed pw
void nm_data_chunk_properties_compute(const struct nm_data_s *p_data, nm_len_t chunk_offset, nm_len_t chunk_len, struct nm_data_properties_s *p_props)
compute properties of the given chunk inside the data
#define NM_REQ_FLAG_PACK_SYNCHRONOUS
flag pack as synchronous (i.e.
without even the implied warranty of !MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE See the GNU !General Public License for more details !mpif h
Definition mpif.h:19
static void nm_gate_set_active(struct nm_gate_s *p_gate)
mark gate as having active requests
static struct nm_matching_gsession_s * nm_matching_gsession_bytag(struct nm_gate_s *p_gate, nm_core_tag_t core_tag)
static nm_len_t nm_drv_max_small(const struct nm_drv_s *const p_drv)
get maximum size for small messages for the given driver
static void nm_core_post_rtr(nm_gate_t p_gate, nm_core_tag_t tag, nm_seq_t seq, nm_trk_id_t trk_id, nm_len_t chunk_offset, nm_len_t chunk_len, const void *p_rdv_data, nm_len_t size)
Post a ready-to-receive to accept chunk on given trk_id.
static void nm_pw_ref_dec_free(struct nm_pkt_wrap_s *p_pw, const char *label)
decrement ref_count for pw, supposed to reach 0
static void nm_pw_assign(struct nm_pkt_wrap_s *p_pw, nm_trk_id_t trk_id, struct nm_drv_s *p_drv, nm_gate_t p_gate)
assign packet to given driver, gate, and track
static void nm_core_post_ack(nm_gate_t p_gate, nm_core_tag_t tag, nm_seq_t seq)
Post an ACK.
static int nm_gate_is_in_active_list(struct nm_core *p_core, struct nm_gate_s *p_gate)
static void nm_req_chunk_init(struct nm_req_chunk_s *p_req_chunk, struct nm_req_s *p_req, nm_len_t chunk_offset, nm_len_t chunk_len)
static void nm_core_post_ctrl(nm_gate_t p_gate, nm_header_ctrl_generic_t *p_header, const void *p_payload, nm_len_t payload_size)
Post a generic control header to the given gate.
static int nm_trk_can_send(const struct nm_trk_s *p_trk)
checks whether we can post one more send pw on this trk
static void nm_core_polling_level(struct nm_core *p_core)
dynamically adapt pioman polling frequency level depending on the number of pending requests
static void nm_pw_completed_enqueue(struct nm_core *p_core, struct nm_pkt_wrap_s *p_pw)
enqueue a pw completion
static void nm_req_chunk_update(struct nm_req_chunk_s *p_req_chunk)
update req chunk properties after it has been modified (split, trim, ...)
static void nm_req_chunk_submit(struct nm_core *p_core, struct nm_req_chunk_s *p_req_chunk)
static void nm_req_chunk_destroy(struct nm_core *p_core, struct nm_req_chunk_s *p_req_chunk)
static void nm_pw_ref_dec(struct nm_pkt_wrap_s *p_pw, const char *label)
decrement ref_count for pw; free if ref_count reaches 0
static struct nm_trk_s * nm_trk_get_by_index(nm_gate_t p_gate, int index)
Get a driver given its id.
static struct nm_req_chunk_s * nm_req_chunk_alloc(struct nm_core *p_core)
static void nm_core_task_enqueue(struct nm_core *p_core, int holding_lock, struct nm_core_task_s *p_core_task)
static void nm_core_post_req_chunk(nm_core_t p_core, nm_gate_t p_gate, struct nm_req_chunk_s *p_req_chunk)
Post a req chunk for sending; it will later be scheduled by the strategy.
#define nm_pw_ref_inc(P_PW, LABEL)
increment refcount for pw; make it a macro to be able to capture file/func/line in refcount tracking
static int nm_gate_isactive(struct nm_gate_s *p_gate)
static struct nm_matching_wildcard_s * nm_matching_wildcard_bytag(struct nm_core *p_core, nm_core_tag_t core_tag)
static void nm_core_post_msg(nm_gate_t p_gate, nm_core_tag_t tag, nm_seq_t seq, nm_len_t msg_len)
static int nm_trk_can_recv(const struct nm_trk_s *p_trk)
checks whether we can post one more recv pw on this trk
static void nm_header_global_finalize(struct nm_pkt_wrap_s *p_pw)
static uint16_t nm_header_global_v0len(const struct nm_pkt_wrap_s *p_pw)
static struct nm_trk_s * nm_gate_trk_get(nm_gate_t p_gate, nm_drv_t p_drv)
Get the track per-gate data.
nm_tag_t tag
the user-supplied tag
static int nm_refcount_dec(struct nm_refcount_s *p_refcount, const void *p_holder __attribute__((unused)))
decrement refcount for holder; returns refcount (if 0, caller may free ref-counted resource)
static int nm_refcount_get(struct nm_refcount_s *p_refcount)
__PUK_SYM_INTERNAL void nm_core_task_flush(struct nm_core *p_core)
#define NM_ALIGN_FRONTIER
#define NM_TRK_SMALL
assert(p_data->ops.p_traversal !=NULL)
static nm_gate_t p_gate
nm_len_t payload_size
Definition nm_headers.h:6
static void nm_header_init_msg(union nm_header_ctrl_generic_s *p_ctrl, nm_core_tag_t tag_id, nm_seq_t seq, nm_len_t msg_len)
Definition nm_headers.h:276
static void nm_header_init_ack(union nm_header_ctrl_generic_s *p_ctrl, nm_core_tag_t tag, nm_seq_t seq)
Definition nm_headers.h:312
#define NM_PROTO_FLAG_ACKREQ
data sent as synchronous send- please send an ack on first chunk
Definition nm_headers.h:54
nm_len_t chunk_len
length of this chunk
Definition nm_headers.h:4
nm_trk_id_t trk_id
index of the track relative to the gate
Definition nm_headers.h:3
static void nm_header_init_rtr(union nm_header_ctrl_generic_s *p_ctrl, nm_core_tag_t tag, nm_seq_t seq, nm_trk_id_t trk_id, nm_len_t chunk_offset, nm_len_t chunk_len, nm_len_t payload_size)
Definition nm_headers.h:300
nm_len_t chunk_offset
offset of the enclosed chunk
Definition nm_headers.h:4
nm_len_t msg_len
length of the full message
Definition nm_headers.h:3
#define NM_PROTO_FLAG_LASTCHUNK
last chunk of data for the given pack
Definition nm_headers.h:50
#define NM_HEADER_DATA_SIZE
Definition nm_headers.h:212
uint16_t v0len
size of v0 actually used ( == offset value to reach v[1] )
Definition nm_headers.h:0
nm_seq_t seq
sequence number
Definition nm_headers.h:2
static void nm_core_unlock(struct nm_core *p_core)
unlock the nm core lock
static void nm_core_nolock_assert(struct nm_core *p_core)
assert that current thread doesn't hold the lock
static int nm_core_trylock(struct nm_core *p_core)
try to lock the nm core core return 1 is lock is successfully acquired, 0 otherwise
static void nm_core_lock_assert(struct nm_core *p_core)
assert that current thread holds the lock
#define NM_FATAL(format,...)
Definition nm_log.h:36
nm_mpi_count_t count
number of elements to be exchanged
nm_len_t size
size of the onsided data (not incuding target-side completion)
#define NM_MAX_UNEXPECTED
Maximum size of unexpected packets.
#define NM_PW_COMPLETED
send/recv of pw is completed
Definition nm_pkt_wrap.h:70
#define NM_PW_GLOBAL_HEADER
pw allocated with a contiguous buffer and a global header has been prepared.
Definition nm_pkt_wrap.h:49
#define NM_PW_BUF_SEND
use buffer-based driver for send
Definition nm_pkt_wrap.h:82
#define NM_PW_BUFFER
pw allocated with NM_MAX_UNEXPECTED contiguous buffer.
Definition nm_pkt_wrap.h:44
const char nm_pw_refcount_completion[]
ref from completion core_task
void nm_pw_free(struct nm_core *p_core, struct nm_pkt_wrap_s *p_pw)
static struct nm_matching_wildcard_s * nm_matching_wildcard_get(struct nm_matching_wildcard_table_s *t, nm_core_tag_t tag)
Definition nm_tags.h:140
int8_t nm_trk_id_t
ID of a track, assigned in order.
Definition nm_types.h:86
#define NM_LEN_UNDEFINED
length is undefined
Definition nm_types.h:71
#define NM_GATE_NONE
no gate
Definition nm_types.h:39
uint64_t nm_len_t
data length used by nmad
Definition nm_types.h:68
uint32_t nm_seq_t
Sequence number for packets on a given gate/tag.
Definition nm_types.h:102
An internal tag.
asynchronous tasks for nmad core.
struct nm_core_task_s::@7::@9 completed_pw
struct nm_core_task_s::@7::@12 pack_submission
enum nm_core_task_kind_e kind
union nm_core_task_s::@7 content
struct nm_pkt_wrap_s * p_pw
struct nm_req_chunk_s * p_req_chunk
Core NewMadeleine structure.
Definition nm_core.h:43
struct nm_core_task_lfqueue_s pending_tasks
core tasks to execute asynchronously; enqueued from non-locked sections; read from locked sections
Definition nm_core.h:67
int n_unpacks
number of pending packs & unpacks
Definition nm_core.h:74
struct nm_ctrl_chunk_allocator_s ctrl_chunk_allocator
allocator for control chunks
Definition nm_core.h:70
struct nm_req_chunk_allocator_s req_chunk_allocator
allocator for req_chunk elements
Definition nm_core.h:69
struct nm_matching_wildcard_table_s wildcard_table
matching info for wildcard requests
Definition nm_core.h:76
struct nm_active_gate_list_s active_gates
list of gates with active requests
Definition nm_core.h:78
int n_packs
Definition nm_core.h:74
struct nm_pw_poll_list_s pw_poll_list
active pw for send/recv to poll
Definition nm_core.h:51
a chunk of control data
Definition nm_headers.h:194
const void * p_payload
payload for the ctrl chunk (e.g.
Definition nm_headers.h:198
nm_header_ctrl_generic_t ctrl
Definition nm_headers.h:197
nm_len_t size
size of the payload (0 if none)
Definition nm_headers.h:199
nm_gate_t p_gate
Definition nm_headers.h:196
a driver.
Definition nm_drv.h:35
struct nm_minidriver_properties_s props
driver properties (profile & capabilities)
Definition nm_drv.h:52
struct nm_pkt_wrap_s * p_pw_recv_any
global recv request if driver supports recv_probe_any
Definition nm_drv.h:49
Connection to another process.
Definition nm_gate.h:104
struct nm_core * p_core
NM core object.
Definition nm_gate.h:140
struct nm_ctrl_chunk_list_s ctrl_chunk_list
control chunks posted to the gate
Definition nm_gate.h:137
struct nm_matching_gsession_table_s gsessions
table of gate/session
Definition nm_gate.h:122
struct nm_trk_s * trks
Tracks opened for each driver.
Definition nm_gate.h:116
int n_trks
Number of tracks opened on this gate.
Definition nm_gate.h:113
struct nm_req_chunk_list_s req_chunk_list
send reqs posted to the gate
Definition nm_gate.h:134
struct nm_pkt_wrap_list_s pending_large_recv
large messages waiting for Track 1 (or 2) to be free- list of pw
Definition nm_gate.h:125
global header at the beginning of pw
Definition nm_headers.h:21
struct to store matching info for wildcard requests, one per session
Definition nm_tags.h:126
int max_pkt_recvs
maximum number of concurrent receives; if 0, 1 is assumed
nm_len_t max_msg_size
maximum message size for the track
int max_pkt_sends
maximum number of concurrent sends; if 0, 1 is assumed
struct nm_minidriver_capabilities_s capabilities
Internal packet wrapper.
struct iovec * v
IO vector.
struct nm_refcount_s refcount
number of references pointing to the header
struct nm_core_task_s core_task
link to insert the pw as a core task
nm_trk_id_t trk_id
assignated track ID.
nm_pw_flag_t flags
packet flags.
nm_drv_t p_drv
assignated driver.
struct nm_trk_s * p_trk
assignated track, if relevant.
nm_gate_t p_gate
assignated gate, if relevant.
a chunk of request
nm_len_t chunk_offset
offset of the chunk relative to the full data in the req
struct nm_core_task_s core_task
nm_proto_t proto_flags
pre-computed proto flags
struct nm_data_properties_s chunk_props
properties of the data chunk
nm_len_t chunk_len
length of the chunk
struct nm_req_s * p_req
link to insert the req chunk as a core task
a generic pack/unpack request
struct nm_req_s::@15::@17 pack
struct nm_req_chunk_s req_chunk
preallocated chunk for the common case (single-chunk)
nm_req_flag_t flags
flags given by user
struct nm_data_s data
data descriptor to send/recv
nm_len_t len
cumulated data length
a track on a given gate
Definition nm_gate.h:69
struct nm_pkt_wrap_list_s active_pw_recv
active pws for recv on the given trk
Definition nm_gate.h:75
struct nm_pkt_wrap_list_s active_pw_send
active pws for send on the given trk
Definition nm_gate.h:76
struct nm_drv_s * p_drv
driver attached to the track
Definition nm_gate.h:72
a unified control header type
Definition nm_headers.h:145