NewMadeleine

Documentation

nm_core_inline.h
Go to the documentation of this file.
1/*
2 * NewMadeleine
3 * Copyright (C) 2006-2024 (see AUTHORS file)
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15
16#ifndef NM_CORE_INLINE_H
17#define NM_CORE_INLINE_H
18
19#include <nm_private.h>
20
26/* ** Driver management ************************************ */
27
30{
31 assert(p_drv != NULL);
32 int i;
33 for(i = 0; i < p_gate->n_trks; i++)
34 {
35 if(p_gate->trks[i].p_drv == p_drv)
36 return &p_gate->trks[i];
37 }
38 return NULL;
39}
40
43static inline struct nm_trk_s*nm_trk_get_by_index(nm_gate_t p_gate, int index)
44{
45 assert(p_gate->n_trks > 0);
46 assert(index < p_gate->n_trks);
47 assert(index >= 0);
48 return &p_gate->trks[index];
49}
50
52static inline nm_len_t nm_drv_max_small(const struct nm_drv_s*const p_drv)
53{
55 return (p_drv->props.capabilities.max_msg_size > max_small) ? max_small : p_drv->props.capabilities.max_msg_size;
56}
57
58
59/* ** Packet wrapper management **************************** */
60
62static inline void nm_pw_assign(struct nm_pkt_wrap_s*p_pw, nm_trk_id_t trk_id, struct nm_drv_s*p_drv, nm_gate_t p_gate)
63{
64 p_pw->trk_id = trk_id;
65 if(p_gate == NM_GATE_NONE)
66 {
67 assert(p_drv->p_pw_recv_any == NULL);
68 assert(p_drv != NULL);
69 p_drv->p_pw_recv_any = p_pw;
70 p_pw->p_trk = NULL;
71 p_pw->p_drv = p_drv;
72 }
73 else
74 {
75 assert(trk_id >= 0);
76 assert(trk_id < p_gate->n_trks);
77 p_pw->p_gate = p_gate;
78 p_pw->p_trk = &p_gate->trks[trk_id];
79 p_pw->p_drv = p_pw->p_trk->p_drv;
80 }
81}
82
83static inline void nm_pw_ref_inc(struct nm_pkt_wrap_s*p_pw)
84{
86}
87static inline void nm_pw_ref_dec(struct nm_pkt_wrap_s*p_pw)
88{
89 const int count = nm_atomic_dec(&p_pw->ref_count);
90 assert(count >= 0);
91 if(count == 0)
92 {
93 assert(p_pw->p_drv != NULL);
94 nm_pw_free(p_pw->p_drv->p_core, p_pw);
95 }
96}
97
98/* ** Gate ************************************************* */
99
100static inline int nm_gate_isactive(struct nm_gate_s*p_gate)
101{
102 return( (!nm_req_chunk_list_empty(&p_gate->req_chunk_list)) ||
103 (!nm_ctrl_chunk_list_empty(&p_gate->ctrl_chunk_list)) ||
104 (p_gate->strat_todo) ||
105 (!nm_pkt_wrap_list_empty(&p_gate->pending_large_recv)));
106}
107
108static inline int nm_gate_is_in_active_list(struct nm_core*p_core, struct nm_gate_s*p_gate)
109{
110 return ((p_gate == nm_active_gate_list_begin(&p_core->active_gates)) ||
111 (!nm_active_gate_list_cell_isnull(p_gate)));
112}
113
115static inline void nm_gate_set_active(struct nm_gate_s*p_gate)
116{
120 {
121 assert(!nm_active_gate_list_contains(&p_gate->p_core->active_gates, p_gate));
122 nm_active_gate_list_push_back(&p_gate->p_core->active_gates, p_gate);
124 }
125}
126
127/* ** Core tasks ******************************************* */
128
129static inline void nm_core_task_enqueue(struct nm_core*p_core, int holding_lock, struct nm_core_task_s*p_core_task)
130{
131 if(holding_lock)
132 {
133 nm_core_lock_assert(p_core);
134 }
135 else
136 {
137 nm_core_nolock_assert(p_core);
138 }
139 int rc;
140 do
141 {
142#ifdef PIOMAN_MULTITHREAD
143 rc = nm_core_task_lfqueue_enqueue(&p_core->pending_tasks, p_core_task);
144#else
145 rc = nm_core_task_lfqueue_enqueue_single_writer(&p_core->pending_tasks, p_core_task);
146#endif /* PIOMAN_MULTITHREAD */
147 if(rc)
148 {
149 if(holding_lock)
150 {
151 nm_core_task_flush(p_core);
152 }
153 else
154 {
155 if(nm_core_trylock(p_core))
156 {
157 nm_core_task_flush(p_core);
158 nm_core_unlock(p_core);
159 }
160 }
161 }
162 }
163 while(rc);
164}
165
166/* ** Strategy ********************************************* */
167
168static inline void nm_strat_submit_req_chunk(nm_core_t p_core, nm_gate_t p_gate, struct nm_req_chunk_s*p_req_chunk, int front)
169{
170 nm_core_lock_assert(p_core);
171 if(p_core->strategy_iface->submit_req_chunk == NULL)
172 {
173 /* default implem for strat submit_req_chunk */
174 if(front)
175 {
176 /* submit to front of list (for repacks) */
177 nm_req_chunk_list_push_front(&p_gate->req_chunk_list, p_req_chunk);
178 }
179 else
180 {
181 /* regular: enqueue at back of list */
182 nm_req_chunk_list_push_back(&p_gate->req_chunk_list, p_req_chunk);
183 }
184 }
185 else
186 {
187 /* call strategy submit_req_chunk */
188 (*p_core->strategy_iface->submit_req_chunk)(p_core->strategy_context, p_req_chunk, front);
189 }
191}
192
195{
196 nm_core_lock_assert(p_core);
197 if(!nm_pkt_wrap_list_empty(&p_gate->pending_large_recv))
198 {
199 struct puk_receptacle_NewMad_Strategy_s*r = &p_gate->strategy_receptacle;
200 if(r->driver->rdv_accept)
201 {
202 (*r->driver->rdv_accept)(r->_status, p_gate);
203 }
204 }
206}
207
210{
211 nm_core_lock_assert(p_core);
212 assert(p_gate != NULL);
213 assert(p_core->strategy_iface->schedule == NULL);
214 nm_strat_rdv_accept(p_core, p_gate); /* accept rdv early so that rtr may be schedule by the try_and_commit below */
215 struct puk_receptacle_NewMad_Strategy_s*r = &p_gate->strategy_receptacle;
216 nm_profile_inc(p_gate->p_core->profiling.n_try_and_commit);
217 /* schedule new outgoing requests on all gates */
218 (*r->driver->try_and_commit)(r->_status, p_gate);
219}
220
222static inline void nm_strat_schedule(nm_core_t p_core, nm_gate_t p_gate)
223{
224#ifdef NMAD_DEBUG
225 static int inprogress = 0;
226 if(p_gate == NULL)
227 {
228 assert(!inprogress); /* make sure no global strat scheduling is done recursively */
229 inprogress = 1;
230 }
231#endif /* NMAD_DEBUG */
232 nm_core_lock_assert(p_core);
233 if(p_core->strategy_iface != NULL && p_core->strategy_iface->schedule != NULL)
234 {
235 /* context-wide strategy */
236 (*p_core->strategy_iface->schedule)(p_core->strategy_context);
237 }
238 else
239 {
240 if(p_gate == NULL)
241 {
242 nm_gate_t p_tmp_gate;
243 puk_list_foreach_safe(nm_active_gate, p_gate, p_tmp_gate, &p_core->active_gates)
244 {
245 assert(nm_active_gate_list_contains(&p_core->active_gates, p_gate));
247 {
249 assert(nm_active_gate_list_contains(&p_core->active_gates, p_gate));
251 {
252 nm_active_gate_list_remove(&p_core->active_gates, p_gate);
253 nm_active_gate_list_cell_setnull(p_gate); /* mark cell as not enqueued in any list */
254 }
255 }
256 }
257 }
258 else
259 {
261 }
262 }
263#ifdef NMAD_DEBUG
264 inprogress = 0;
265#endif /* NMAD_DEBUG */
266}
267
269{
270 struct nm_core*p_core = p_gate->p_core;
271 nm_core_lock_assert(p_core);
272 struct nm_ctrl_chunk_s*p_ctrl_chunk = nm_ctrl_chunk_malloc(&p_core->ctrl_chunk_allocator);
273 nm_ctrl_chunk_list_cell_init(p_ctrl_chunk);
274 p_ctrl_chunk->ctrl = *p_header;
275 p_ctrl_chunk->p_gate = p_gate;
276 if(p_core->strategy_iface->submit_ctrl_chunk == NULL)
277 {
278 nm_ctrl_chunk_list_push_back(&p_gate->ctrl_chunk_list, p_ctrl_chunk);
280 }
281 else
282 {
283 (*p_core->strategy_iface->submit_ctrl_chunk)(p_core->strategy_context, p_ctrl_chunk);
284 }
285}
286
288static inline void nm_pw_completed_enqueue(struct nm_core*p_core, struct nm_pkt_wrap_s*p_pw)
289{
290 nm_core_nolock_assert(p_core);
291 assert(!(p_pw->flags & NM_PW_COMPLETED));
292 p_pw->flags |= NM_PW_COMPLETED;
293#ifdef PIOMAN
294 piom_ltask_completed(&p_pw->ltask);
295#endif
297 p_pw->core_task.content.completed_pw.p_pw = p_pw;
298 nm_core_task_enqueue(p_core, 0, &p_pw->core_task);
299}
300
301
302/* ** req chunks ******************************************* */
303
304static inline void nm_req_chunk_submit(struct nm_core*p_core, struct nm_req_chunk_s*p_req_chunk)
305{
306 nm_core_nolock_assert(p_core);
307 assert(p_req_chunk->p_req != NULL);
309 p_req_chunk->core_task.content.pack_submission.p_req_chunk = p_req_chunk;
310 nm_core_task_enqueue(p_core, 0, &p_req_chunk->core_task);
311}
312
313static inline void nm_req_chunk_destroy(struct nm_core*p_core, struct nm_req_chunk_s*p_req_chunk)
314{
315 struct nm_req_s*p_pack = p_req_chunk->p_req;
316#ifdef NMAD_DEBUG
317 p_req_chunk->chunk_len = NM_LEN_UNDEFINED;
318 p_req_chunk->chunk_offset = NM_LEN_UNDEFINED;
319#endif
320 if(p_req_chunk != &p_pack->req_chunk)
321 {
322 nm_req_chunk_free(&p_core->req_chunk_allocator, p_req_chunk);
323 }
324 else
325 {
326 p_pack->req_chunk.p_req = NULL;
327 }
328}
329
330static inline struct nm_req_chunk_s*nm_req_chunk_alloc(struct nm_core*p_core)
331{
332 struct nm_req_chunk_s*p_req_chunk = nm_req_chunk_malloc(&p_core->req_chunk_allocator);
333 nm_req_chunk_list_cell_init(p_req_chunk);
334 return p_req_chunk;
335}
336
337static inline void nm_req_chunk_init(struct nm_req_chunk_s*p_req_chunk, struct nm_req_s*p_req,
339{
340 nm_req_chunk_list_cell_init(p_req_chunk);
341 p_req_chunk->p_req = p_req;
342 p_req_chunk->chunk_offset = chunk_offset;
343 p_req_chunk->chunk_len = chunk_len;
344 p_req_chunk->proto_flags = 0;
345 assert(chunk_offset + chunk_len <= p_req->pack.len);
347 {
348 p_req_chunk->proto_flags |= NM_PROTO_FLAG_LASTCHUNK;
349 }
351 {
352 p_req_chunk->proto_flags |= NM_PROTO_FLAG_ACKREQ;
353 }
355}
356
361 const void*p_rdv_data)
362{
364 if(chunk_len == 0)
365 {
366 NM_FATAL("trying to post RTR with chunk_offset = %lu; chunk_len = %lu\n", chunk_offset, chunk_len);
367 }
370}
371
375{
379}
380
382{
386}
387
389static inline void nm_core_polling_level(struct nm_core*p_core)
390{
391 nm_core_lock_assert(p_core);
392 assert(p_core->n_packs >= 0);
393 assert(p_core->n_unpacks >= 0);
394#ifdef PIOMAN
395 const int high =
396 (p_core->n_packs > 0) ||
397 (p_core->n_unpacks > 0) ||
398 (!nm_core_task_lfqueue_empty(&p_core->pending_tasks));
399 piom_ltask_poll_level_set(high);
400#endif /* PIOMAN */
401}
402
403/* ** Tags & matching ************************************** */
404
405static inline struct nm_matching_wildcard_s*nm_matching_wildcard_bytag(struct nm_core*p_core, nm_core_tag_t core_tag)
406{
407 const nm_core_tag_t session_tag = nm_core_tag_build(nm_core_tag_get_hashcode(core_tag), 0);
408 struct nm_matching_wildcard_s*p_wildcard = nm_matching_wildcard_get(&p_core->wildcard_table, session_tag);
409 return p_wildcard;
410}
411
413{
414 const nm_core_tag_t session_tag = nm_core_tag_build(nm_core_tag_get_hashcode(core_tag), 0);
415 struct nm_matching_gsession_s*p_gsession = nm_matching_gsession_get(&p_gate->gsessions, session_tag);
416 return p_gsession;
417}
418
419
420#endif /* NM_CORE_INLINE_H */
static int nm_atomic_dec(int *v)
decrement int, atomic only when multithread
static nm_session_hash_t nm_core_tag_get_hashcode(nm_core_tag_t core_tag)
#define NM_REQ_FLAG_PACK_SYNCHRONOUS
flag pack as synchronous (i.e.
static nm_core_tag_t nm_core_tag_build(nm_session_hash_t hashcode, nm_tag_t tag)
static int nm_atomic_inc(int *v)
increment int, atomic only when multithread
@ NM_CORE_TASK_PACK_SUBMISSION
process a submitted pack request
@ NM_CORE_TASK_COMPLETED_PW
process a completed pw
void nm_data_chunk_properties_compute(const struct nm_data_s *p_data, nm_len_t chunk_offset, nm_len_t chunk_len, struct nm_data_properties_s *p_props)
compute properties of the given chunk inside the data
__PUK_SYM_INTERNAL void nm_core_task_flush(struct nm_core *p_core)
without even the implied warranty of !MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE See the GNU !General Public License for more details !mpif h
Definition: mpif.h:19
static void nm_pw_ref_inc(struct nm_pkt_wrap_s *p_pw)
static void nm_gate_set_active(struct nm_gate_s *p_gate)
mark gate as having active requests
static struct nm_matching_gsession_s * nm_matching_gsession_bytag(struct nm_gate_s *p_gate, nm_core_tag_t core_tag)
static nm_len_t nm_drv_max_small(const struct nm_drv_s *const p_drv)
get maximum size for small messages for the given driver
static void nm_pw_assign(struct nm_pkt_wrap_s *p_pw, nm_trk_id_t trk_id, struct nm_drv_s *p_drv, nm_gate_t p_gate)
assign packet to given driver, gate, and track
static void nm_core_post_rtr(nm_gate_t p_gate, nm_core_tag_t tag, nm_seq_t seq, nm_trk_id_t trk_id, nm_len_t chunk_offset, nm_len_t chunk_len, const void *p_rdv_data)
Post a ready-to-receive to accept chunk on given trk_id.
static void nm_core_post_ack(nm_gate_t p_gate, nm_core_tag_t tag, nm_seq_t seq)
Post an ACK.
static void nm_strat_submit_req_chunk(nm_core_t p_core, nm_gate_t p_gate, struct nm_req_chunk_s *p_req_chunk, int front)
static int nm_gate_is_in_active_list(struct nm_core *p_core, struct nm_gate_s *p_gate)
static void nm_req_chunk_init(struct nm_req_chunk_s *p_req_chunk, struct nm_req_s *p_req, nm_len_t chunk_offset, nm_len_t chunk_len)
static void nm_core_polling_level(struct nm_core *p_core)
dynamically adapt pioman polling frequency level depending on the number of pending requests
static void nm_pw_completed_enqueue(struct nm_core *p_core, struct nm_pkt_wrap_s *p_pw)
enqueue a pw completion, or process immediately if possible
static void nm_req_chunk_submit(struct nm_core *p_core, struct nm_req_chunk_s *p_req_chunk)
static void nm_strat_gate_schedule(nm_core_t p_core, nm_gate_t p_gate)
apply strategy on the given gate, for the case where strategy doesn't have global scheduling
static void nm_req_chunk_destroy(struct nm_core *p_core, struct nm_req_chunk_s *p_req_chunk)
static void nm_strat_rdv_accept(nm_core_t p_core, nm_gate_t p_gate)
process postponed rdv requests
static struct nm_trk_s * nm_trk_get_by_index(nm_gate_t p_gate, int index)
Get a driver given its id.
static struct nm_req_chunk_s * nm_req_chunk_alloc(struct nm_core *p_core)
static void nm_strat_schedule(nm_core_t p_core, nm_gate_t p_gate)
apply strategy on the given gate (all active gates if p_gate = NULL)
static void nm_strat_pack_ctrl(nm_gate_t p_gate, nm_header_ctrl_generic_t *p_header)
static void nm_pw_ref_dec(struct nm_pkt_wrap_s *p_pw)
static void nm_core_task_enqueue(struct nm_core *p_core, int holding_lock, struct nm_core_task_s *p_core_task)
static int nm_gate_isactive(struct nm_gate_s *p_gate)
static struct nm_matching_wildcard_s * nm_matching_wildcard_bytag(struct nm_core *p_core, nm_core_tag_t core_tag)
static void nm_core_post_msg(nm_gate_t p_gate, nm_core_tag_t tag, nm_seq_t seq, nm_len_t msg_len)
static struct nm_trk_s * nm_gate_trk_get(nm_gate_t p_gate, nm_drv_t p_drv)
Get the track per-gate data.
nm_tag_t tag
the user-supplied tag
#define NM_ALIGN_FRONTIER
#define nm_profile_inc(COUNTER)
static nm_gate_t p_gate
@ NM_GATE_STATUS_CONNECTED
gate actually connected, may be used/polled
Definition: nm_gate.h:87
static void nm_header_init_msg(union nm_header_ctrl_generic_s *p_ctrl, nm_core_tag_t tag_id, nm_seq_t seq, nm_len_t msg_len)
Definition: nm_headers.h:287
static void nm_header_init_rtr(union nm_header_ctrl_generic_s *p_ctrl, nm_core_tag_t tag, nm_seq_t seq, nm_trk_id_t trk_id, nm_len_t chunk_offset, nm_len_t chunk_len, const void *p_rdv_data)
Definition: nm_headers.h:310
static void nm_header_init_ack(union nm_header_ctrl_generic_s *p_ctrl, nm_core_tag_t tag, nm_seq_t seq)
Definition: nm_headers.h:326
#define NM_PROTO_FLAG_ACKREQ
data sent as synchronous send- please send an ack on first chunk
Definition: nm_headers.h:70
nm_len_t chunk_len
length of this chunk
Definition: nm_headers.h:4
nm_trk_id_t trk_id
index of the track relative to the gate
Definition: nm_headers.h:3
nm_len_t chunk_offset
offset of the enclosed chunk
Definition: nm_headers.h:4
nm_len_t msg_len
length of the full message
Definition: nm_headers.h:3
#define NM_PROTO_FLAG_LASTCHUNK
last chunk of data for the given pack
Definition: nm_headers.h:66
#define NM_HEADER_DATA_SIZE
Definition: nm_headers.h:223
nm_seq_t seq
sequence number
Definition: nm_headers.h:2
static void nm_core_unlock(struct nm_core *p_core)
unlock the nm core lock
static void nm_core_nolock_assert(struct nm_core *p_core)
assert that current thread doesn't hold the lock
static int nm_core_trylock(struct nm_core *p_core)
try to lock the nm core core return 1 is lock is successfully acquired, 0 otherwise
static void nm_core_lock_assert(struct nm_core *p_core)
assert that current thread holds the lock
#define NM_FATAL(format,...)
Definition: nm_log.h:36
nm_mpi_count_t count
number of elements to be exchanged
#define NM_MAX_UNEXPECTED
Maximum size of unexpected packets.
Definition: nm_parameters.h:22
#define NM_PW_COMPLETED
send/recv of pw is completed
Definition: nm_pkt_wrap.h:68
void nm_pw_free(struct nm_core *p_core, struct nm_pkt_wrap_s *p_pw)
static struct nm_matching_wildcard_s * nm_matching_wildcard_get(struct nm_matching_wildcard_table_s *t, nm_core_tag_t tag)
Definition: nm_tags.h:140
int8_t nm_trk_id_t
ID of a track, assigned in order.
Definition: nm_types.h:88
#define NM_LEN_UNDEFINED
length is undefined
Definition: nm_types.h:73
#define NM_GATE_NONE
no gate
Definition: nm_types.h:41
uint64_t nm_len_t
data length used by nmad
Definition: nm_types.h:70
uint32_t nm_seq_t
Sequence number for packets on a given gate/tag.
Definition: nm_types.h:104
An internal tag.
asynchronous tasks for nmad core.
struct nm_core_task_s::@11::@13 completed_pw
enum nm_core_task_kind_e kind
union nm_core_task_s::@11 content
struct nm_pkt_wrap_s * p_pw
struct nm_core_task_s::@11::@15 pack_submission
struct nm_req_chunk_s * p_req_chunk
Core NewMadeleine structure.
Definition: nm_core.h:39
struct nm_core_task_lfqueue_s pending_tasks
core tasks to execute asynchronously; enqueued from non-locked sections; read from locked sections
Definition: nm_core.h:64
puk_context_t strategy_context
global context of the strategy
Definition: nm_core.h:57
int n_unpacks
number of pending packs & unpacks
Definition: nm_core.h:71
struct nm_ctrl_chunk_allocator_s ctrl_chunk_allocator
allocator for control chunks
Definition: nm_core.h:67
struct nm_req_chunk_allocator_s req_chunk_allocator
allocator for req_chunk elements
Definition: nm_core.h:66
struct nm_matching_wildcard_table_s wildcard_table
matching info for wildcard requests
Definition: nm_core.h:73
struct nm_active_gate_list_s active_gates
list of gates with active requests
Definition: nm_core.h:75
int n_packs
Definition: nm_core.h:71
const struct nm_strategy_iface_s * strategy_iface
interface of the selected strategy
Definition: nm_core.h:58
a chunk of control data
Definition: nm_headers.h:209
nm_header_ctrl_generic_t ctrl
Definition: nm_headers.h:212
nm_gate_t p_gate
Definition: nm_headers.h:211
Driver.
Definition: nm_drv.h:29
struct nm_minidriver_properties_s props
driver properties (profile & capabilities)
Definition: nm_drv.h:46
struct nm_pkt_wrap_s * p_pw_recv_any
global recv request if driver supports recv_any
Definition: nm_drv.h:43
struct nm_core * p_core
Definition: nm_drv.h:56
Connection to another process.
Definition: nm_gate.h:100
int strat_todo
strategy has work to do
Definition: nm_gate.h:128
struct puk_receptacle_NewMad_Strategy_s strategy_receptacle
Strategy components elements.
Definition: nm_gate.h:126
struct nm_core * p_core
NM core object.
Definition: nm_gate.h:137
struct nm_ctrl_chunk_list_s ctrl_chunk_list
control chunks posted to the gate
Definition: nm_gate.h:134
struct nm_matching_gsession_table_s gsessions
table of gate/session
Definition: nm_gate.h:118
struct nm_trk_s * trks
Tracks opened for each driver.
Definition: nm_gate.h:112
int n_trks
Number of tracks opened on this gate.
Definition: nm_gate.h:109
struct nm_req_chunk_list_s req_chunk_list
send reqs posted to the gate
Definition: nm_gate.h:131
nm_gate_status_t status
current status of the gate (connected / not connected)
Definition: nm_gate.h:106
struct nm_pkt_wrap_list_s pending_large_recv
large messages waiting for Track 1 (or 2) to be free- list of pw
Definition: nm_gate.h:121
struct to store matching info for wildcard requests, one per session
Definition: nm_tags.h:126
nm_len_t max_msg_size
maximum message size for the track
struct nm_minidriver_capabilities_s capabilities
Internal packet wrapper.
Definition: nm_pkt_wrap.h:117
struct nm_core_task_s core_task
Definition: nm_pkt_wrap.h:125
nm_trk_id_t trk_id
assignated track ID.
Definition: nm_pkt_wrap.h:133
int ref_count
number of references pointing to the header
Definition: nm_pkt_wrap.h:150
nm_pw_flag_t flags
packet flags.
Definition: nm_pkt_wrap.h:139
nm_drv_t p_drv
link to insert the pw as a core task
Definition: nm_pkt_wrap.h:132
struct nm_trk_s * p_trk
assignated track, if relevant.
Definition: nm_pkt_wrap.h:135
nm_gate_t p_gate
assignated gate, if relevant.
Definition: nm_pkt_wrap.h:134
a chunk of request
nm_len_t chunk_offset
offset of the chunk relative to the full data in the req
struct nm_core_task_s core_task
nm_proto_t proto_flags
pre-computed proto flags
struct nm_data_properties_s chunk_props
properties of the data chunk
nm_len_t chunk_len
length of the chunk
struct nm_req_s * p_req
link to insert the req chunk as a core task
a generic pack/unpack request
struct nm_req_chunk_s req_chunk
preallocated chunk for the common case (single-chunk)
nm_req_flag_t flags
flags given by user
struct nm_data_s data
data descriptor to send/recv
struct nm_req_s::@18::@20 pack
nm_len_t len
cumulated data length
void(* submit_req_chunk)(puk_context_t p_context, struct nm_req_chunk_s *p_req_chunk, int front)
submit an outgoing req chunk to the strategy; req chunk is enqueued in gate req_chunk_list if this fu...
Definition: nm_strategy.h:44
void(* submit_ctrl_chunk)(puk_context_t p_context, struct nm_ctrl_chunk_s *p_ctrl_chunk)
submit an outgoing ctrl chunk to the strategy; ctrl_chunk is enqueued in gate ctrl_chunk_list if this...
Definition: nm_strategy.h:48
void(* schedule)(puk_context_t p_context)
schedule all events:
Definition: nm_strategy.h:40
a track on a given gate
Definition: nm_gate.h:64
struct nm_drv_s * p_drv
driver attached to the track
Definition: nm_gate.h:67
a unified control header type
Definition: nm_headers.h:160