NewMadeleine

Documentation

« back to PM2 home.
nm_core_types.h
Go to the documentation of this file.
1/*
2 * NewMadeleine
3 * Copyright (C) 2006-2026 (see AUTHORS file)
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15
22#ifndef NM_CORE_TYPES_H
23#define NM_CORE_TYPES_H
24
25
26/* ** alignment ******************************************** */
27
28#define NM_ALIGN_FRONTIER sizeof(NM_ALIGN_TYPE)
29#define nm_aligned(x) nm_aligned_n((x), NM_ALIGN_FRONTIER)
30
32{
33 return (v + a - 1) & ~(a - 1);
34}
35
38static inline void*nm_aligned_ptr(const void*p, nm_len_t a)
39{
40 return (void*)((uintptr_t)p & ~(a - 1));
41}
42
43#define nm_offset_of(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
44#define nm_container_of(ptr, type, member) \
45 ((type *)((char *)(__typeof__ (&((type *)0)->member))(ptr)- \
46 nm_offset_of(type,member)))
47
48/* ** Threads ********************************************* */
49
50#ifdef PIOMAN_MULTITHREAD
51#define NM_IS_THREADED 1
52#else
53#define NM_IS_THREADED ((nm_core_get_thread_level(nm_core_get_singleton()) >= NM_THREAD_SERIALIZED) ? 1 : 0)
54#endif
55
56/* ** Allocators ****************************************** */
57
58#define NM_ALLOCATOR_TYPE(ENAME, TYPE) PUK_ALLOCATOR_TYPE_EXT(ENAME, TYPE, !NM_IS_THREADED)
59
60/* ** Requests ********************************************* */
61
63
65
67NM_ALLOCATOR_TYPE(nm_req_chunk, struct nm_req_chunk_s);
68
69
70/* ** Packet wrappers ************************************** */
71
73PUK_LFQUEUE_TYPE(nm_pkt_wrap, struct nm_pkt_wrap_s*, NULL, 512);
74
75
76/* ** Tracks *********************************************** */
77
78#define NM_TRK_SMALL ((nm_trk_id_t)0)
79#define NM_TRK_LARGE ((nm_trk_id_t)1)
80#define NM_TRK_NONE ((nm_trk_id_t)-1)
81
83#define NM_MAX_TRACKS 2
84
85/* ** Sequence numbers ************************************* */
86
88#define NM_SEQ_FIRST ((nm_seq_t)1)
89
92{
94 seq++;
95 if(seq == NM_SEQ_NONE)
96 seq++;
97 return seq;
98}
99
102static inline int nm_seq_compare(nm_seq_t current, nm_seq_t seq1, nm_seq_t seq2)
103{
104 assert(seq1 != current);
105 assert(seq2 != current);
106 const nm_seq_t seq1_abs = (seq1 > current) ? (seq1 - current) : (seq1 - current - 1);
107 const nm_seq_t seq2_abs = (seq2 > current) ? (seq2 - current) : (seq2 - current - 1);
108 if(seq1_abs < seq2_abs)
109 return -1;
110 else if(seq1_abs > seq2_abs)
111 return 1;
112 else
113 return 0;
114}
115
119static inline int nm_seq_fuzzy_compare(nm_seq_t seq1, nm_seq_t seq2)
120{
121 static const nm_seq_t seq_compare_threshold = NM_SEQ_MAX / 4;
122 if(seq1 == seq2)
123 return 0;
124 else if((seq1 < seq2) && (seq2 - seq1 < seq_compare_threshold))
125 return -1;
126 else if((seq1 > seq2) && (seq1 - seq2 > 3 * seq_compare_threshold))
127 return -1;
128 else if((seq1 > seq2) && (seq1 - seq2 < seq_compare_threshold))
129 return 1;
130 else if((seq1 < seq2) && (seq2 - seq1 > 3 * seq_compare_threshold))
131 return 1;
132 else
133 {
134 NM_FATAL("cannot compare seq1 = %u; seq2=%u\n", seq1, seq2);
135 }
136}
137
138/* ** Profiling ******************************************** */
139
140#ifdef NMAD_PROFILE
141#define nm_profile_add(COUNTER, VALUE) do { __sync_fetch_and_add(&COUNTER, VALUE); } while(0)
142#else
143#define nm_profile_add(COUNTER, VALUE) do {} while(0)
144#endif
145
146#define nm_profile_inc(COUNTER) nm_profile_add(COUNTER, 1)
147
148/* ** init helper ****************************************** */
149
155
156#define NM_LAZY_INITIALIZER_INIT { .init_done = 0, .initializing = 0 }
157
163#define NM_LAZY_INITIALIZER(ENAME, CTOR, DTOR) \
164 static struct nm_lazy_initializer_s ENAME ## _lazy_initializer = NM_LAZY_INITIALIZER_INIT; \
165 \
166 static void ENAME ## _lazy_init(void) \
167 { \
168 if(ENAME##_lazy_initializer.init_done == 0) \
169 { \
170 if(nm_atomic_inc(&ENAME##_lazy_initializer.initializing) == 0) \
171 { \
172 (*CTOR)(); \
173 nm_atomic_inc(&ENAME##_lazy_initializer.init_done); \
174 } \
175 else \
176 { \
177 while(!ENAME##_lazy_initializer.init_done) \
178 { \
179 sched_yield(); \
180 } \
181 } \
182 } \
183 else if(ENAME##_lazy_initializer.init_done == -1) \
184 { \
185 NM_FATAL("trying to use %s after detructor.\n", #ENAME); \
186 } \
187 } \
188 static void ENAME ## _lazy_destructor(void) __attribute__((destructor)); \
189 static void ENAME ## _lazy_destructor(void) \
190 { \
191 if(ENAME##_lazy_initializer.init_done > 0) \
192 { \
193 (*DTOR)(); \
194 } \
195 ENAME##_lazy_initializer.init_done = -1; \
196 }
197
198
199/* ** Drivers ********************************************** */
200
201
202/* ** Events *********************************************** */
203
204PUK_VECT_TYPE(nm_core_monitor, struct nm_core_monitor_s*);
205
207PUK_LIST_TYPE(nm_core_pending_event,
208 struct nm_core_event_s event;
209 struct nm_core_monitor_s*p_core_monitor;
210 );
211
219PUK_LFQUEUE_TYPE(nm_core_dispatching_event, struct nm_core_dispatching_event_s*, NULL, 1024);
220NM_ALLOCATOR_TYPE(nm_core_dispatching_event, struct nm_core_dispatching_event_s);
221
222/* ** Unexpected chunks ************************************ */
223
224PUK_LIST_DECLARE_TYPE2(nm_unexpected_wildcard, struct nm_unexpected_s);
225PUK_LIST_DECLARE_TYPE2(nm_unexpected_gtag, struct nm_unexpected_s);
226PUK_LIST_DECLARE_TYPE2(nm_unexpected_gate, struct nm_unexpected_s);
227PUK_LIST_DECLARE_TYPE2(nm_unexpected_tag, struct nm_unexpected_s);
228
240
247
264
265PUK_LIST_CREATE_FUNCS(nm_unexpected_wildcard);
266PUK_LIST_CREATE_FUNCS(nm_unexpected_gtag);
267PUK_LIST_CREATE_FUNCS(nm_unexpected_gate);
268PUK_LIST_CREATE_FUNCS(nm_unexpected_tag);
269
270
271#endif /* NM_CORE_TYPES_H */
void(* nm_injector_pull_data_t)(struct nm_req_s *p_req, const struct nm_data_s *p_data, nm_len_t chunk_offset, nm_len_t chunk_len, void *p_ref)
user-supplied function called to pull data to posted request through nmad core p_req is the user requ...
PUK_LFQUEUE_TYPE(nm_pkt_wrap, struct nm_pkt_wrap_s *, NULL, 512)
LF queue type for completed pw.
PUK_LIST_DECLARE_TYPE2(nm_unexpected_wildcard, struct nm_unexpected_s)
static nm_len_t nm_aligned_n(nm_len_t v, nm_len_t a)
PUK_VECT_TYPE(nm_core_monitor, struct nm_core_monitor_s *)
static nm_seq_t nm_seq_next(nm_seq_t seq)
Compute next sequence number.
PUK_LIST_CREATE_FUNCS(nm_req)
static int nm_seq_compare(nm_seq_t current, nm_seq_t seq1, nm_seq_t seq2)
compare to seq numbers, assuming they are in the future returns -1 if seq1 is before seq2,...
#define NM_ALLOCATOR_TYPE(ENAME, TYPE)
static void * nm_aligned_ptr(const void *p, nm_len_t a)
get a pointer aligned to given alignment a
static int nm_seq_fuzzy_compare(nm_seq_t seq1, nm_seq_t seq2)
compare to seq numbers taking into account looping at overflow, assuming they are less than halftrip ...
PUK_LIST_TYPE(nm_core_pending_event, struct nm_core_event_s event;struct nm_core_monitor_s *p_core_monitor;)
a pending event, not dispatched immediately because it was received out of order
assert(p_data->ops.p_traversal !=NULL)
nm_seq_t seq
sequence number
Definition nm_headers.h:2
#define NM_FATAL(format,...)
Definition nm_log.h:36
#define NM_SEQ_MAX
largest sequence number
Definition nm_types.h:108
#define NM_SEQ_NONE
Reserved sequence number never used by real packets.
Definition nm_types.h:105
int32_t nm_prio_t
message priority
Definition nm_types.h:78
uint64_t nm_len_t
data length used by nmad
Definition nm_types.h:68
uint32_t nm_seq_t
Sequence number for packets on a given gate/tag.
Definition nm_types.h:102
uint8_t nm_proto_t
protocol flags- not part of the public API, but needed for inline
Definition nm_types.h:99
a chunk to be injected into nmad core
nm_injector_pull_data_t p_pull_data
function to call to actually get data
void * p_ref
user-supplied ref for the above function
an event ready for dispatch (matching already done)
struct nm_monitor_s * p_monitor
struct nm_core_event_s event
An event, generated by the NewMad core.
global monitor for status transitions
An internal tag.
asynchronous tasks for nmad core.
Connection to another process.
Definition nm_gate.h:104
an incoming chunk of data
nm_seq_t seq
sequence number
nm_core_tag_t tag
full tag
nm_len_t chunk_offset
offset of the chunk in the full message
nm_proto_t flags
proto flags associated with the chunk (NM_PROTO_FLAG_*)
nm_prio_t priority
priority of the incoming data (only for rdv)
nm_len_t chunk_len
length of the chunk itself
nm_gate_t p_gate
gate the chunk arrived from
int initializing
whether init is in progress
int init_done
whether init is already done
containers for matching info, used for caching
generic monitor, used for requests and for global events (with matching)
Internal packet wrapper.
a chunk of request
a generic pack/unpack request
a chunk of unexpected message to be stored
PUK_LIST_LINK(nm_unexpected_gate)
PUK_LIST_LINK(nm_unexpected_gtag)
link for list of unexpected per-tag
struct nm_core_task_s core_task
core task for unpack_next
struct nm_pkt_wrap_s * p_pw
pw this chunk arrived from; may be NULL if data is brought by injector
const union nm_header_generic_s * p_header
raw header in pw buffer
struct nm_matching_container_s matching
cache for matching containers
PUK_LIST_LINK(nm_unexpected_wildcard)
nm_len_t msg_len
length of full message on last chunk, NM_LEN_UNDEFINED if not last chunk
PUK_LIST_LINK(nm_unexpected_tag)
struct nm_chunk_injector_s injector
injector for this chunk
int matched
flag whether the unexpected is already matched (and delayed), and thus only enqueued in gtag matching...
struct nm_in_chunk_s chunk
metadata of the enclosed data chunk
a unified header for all types (ctrl/data)
Definition nm_headers.h:162