17#ifndef NM_CORE_INTERFACE_H
18#define NM_CORE_INTERFACE_H
26#include <Padico/Puk.h>
80static inline nm_core_t nm_core_get_singleton(
void)
97 nm_drv_t *pp_drv,
const char**p_url);
161#define NM_STATUS_NONE ((nm_status_t)0x00000000)
163#define NM_STATUS_PACK_INIT ((nm_status_t)0x00000001)
165#define NM_STATUS_UNPACK_INIT ((nm_status_t)0x00000002)
167#define NM_STATUS_PACK_COMPLETED ((nm_status_t)0x00000004)
169#define NM_STATUS_UNPACK_COMPLETED ((nm_status_t)0x00000008)
171#define NM_STATUS_UNEXPECTED ((nm_status_t)0x00000010)
173#define NM_STATUS_UNPACK_CANCELLED ((nm_status_t)0x00000020)
175#define NM_STATUS_PACK_POSTED ((nm_status_t)0x00000040)
177#define NM_STATUS_UNPACK_POSTED ((nm_status_t)0x00000080)
179#define NM_STATUS_ACK_RECEIVED ((nm_status_t)0x00000100)
181#define NM_STATUS_UNPACK_DATA0 ((nm_status_t)0x00000200)
183#define NM_STATUS_UNPACK_DATA_SIZE ((nm_status_t)0x00000400)
185#define NM_STATUS_FINALIZED ((nm_status_t)0x00000800)
187#define NM_STATUS_ERROR ((nm_status_t)0x00001000)
189#define NM_STATUS_PACK_MSG_SIZE ((nm_status_t)0x00002000)
191#define NM_STATUS_UNPACK_PREFETCHED ((nm_status_t)0x00004000)
194#define NM_STATUS_MASK_FULL ((nm_status_t)-1)
201#define NM_REQ_FLAG_NONE ((nm_req_flag_t)0x00000000)
203#define NM_REQ_FLAG_PACK_SYNCHRONOUS ((nm_req_flag_t)0x00001000)
205#define NM_REQ_FLAG_PACK ((nm_req_flag_t)0x00002000)
207#define NM_REQ_FLAG_UNPACK ((nm_req_flag_t)0x00004000)
209#define NM_REQ_FLAG_UNPACK_DATA_INFO ((nm_req_flag_t)0x00008000)
211#define NM_REQ_FLAG_UNPACK_MATCHING_INFO ((nm_req_flag_t)0x00010000)
213#define NM_REQ_FLAG_UNPACK_PREFETCHING ((nm_req_flag_t)0x00020000)
215#define NM_REQ_FLAG_MATCHING_WILDCARD ((nm_req_flag_t)0x00100000)
217#define NM_REQ_FLAG_MATCHING_GATE ((nm_req_flag_t)0x00200000)
219#define NM_REQ_FLAG_MATCHING_TAG ((nm_req_flag_t)0x00400000)
221#define NM_REQ_FLAG_MATCHING_FULL ((nm_req_flag_t)0x00800000)
223#define NM_REQ_FLAG_FINALIZE_LATER ((nm_req_flag_t)0x01000000)
225#define NM_REQ_FLAG_UNPACK_PARTITIONED ((nm_req_flag_t)0x02000000)
227#define NM_REQ_FLAG_PACK_PARTITIONED ((nm_req_flag_t)0x04000000)
233#define NM_REQ_CHUNK_FLAG_NONE ((nm_req_chunk_flag_t)0x00000000)
235#define NM_REQ_CHUNK_FLAG_SHORT ((nm_req_chunk_flag_t)0x00020000)
237#define NM_REQ_CHUNK_FLAG_USE_COPY ((nm_req_chunk_flag_t)0x00080000)
239#define NM_REQ_CHUNK_FLAG_DATA_ITERATOR ((nm_req_chunk_flag_t)0x00100000)
257#define NM_CORE_TAG_HASH_FULL ((nm_session_hash_t)0xFFFFFFFF)
268#define NM_CORE_TAG_MASK_FULL ((nm_core_tag_t){ .tag = NM_TAG_MASK_FULL, .hashcode = NM_CORE_TAG_HASH_FULL })
269#define NM_CORE_TAG_NONE ((nm_core_tag_t){ .tag = 0, .hashcode = 0x0 })
345#define NM_EVENT_MATCHING_ANY ((struct nm_core_event_matching_s){ .p_gate = NM_ANY_GATE, .tag = NM_CORE_TAG_NONE, .tag_mask = NM_CORE_TAG_NONE })
347#define NM_MONITOR_NULL ((struct nm_monitor_s){ .p_notifier = NULL, .event_mask = 0, .ref = NULL })
349#define NM_CORE_MONITOR_NULL ((struct nm_core_monitor_s){ .monitor = NM_MONITOR_NULL, .matching = NM_EVENT_MATCHING_ANY })
384#define NM_MATCHING_CONTAINER_NULL ((struct nm_matching_container_s) { NULL })
508 struct nm_req_pchunk_s
667 piom_cond_init(p_cond, bitmask);
672 piom_cond_destroy(p_cond);
677 return piom_cond_test(p_cond, bitmask);
687 piom_cond_add(p_cond, bitmask);
692 piom_cond_mask(p_cond, bitmask);
697 piom_cond_wait(p_cond, bitmask);
702 piom_cond_signal(p_cond, bitmask);
707 piom_cond_wait_all(pp_conds, n, offset, bitmask);
722 return ((*p_cond) & bitmask);
756 for(i = 0; i < n; i++)
758 if(pp_conds[i] != NULL)
825 const uintptr_t status_offset = (uintptr_t)&p_req->
status - (uintptr_t)p_req;
860 __sync_synchronize();
866#if defined(PIOMAN_MULTITHREAD)
867 __sync_synchronize();
869 nm_core_t p_core = nm_core_get_singleton();
872 __sync_synchronize();
880#if defined(PIOMAN_MULTITHREAD)
881 return __sync_fetch_and_add(v, 1);
883 nm_core_t p_core = nm_core_get_singleton();
886 return __sync_fetch_and_add(v, 1);
898 return __sync_fetch_and_add(v, 1);
904#if defined(PIOMAN_MULTITHREAD)
905 return __sync_sub_and_fetch(v, 1);
907 nm_core_t p_core = nm_core_get_singleton();
910 return __sync_sub_and_fetch(v, 1);
922 return __sync_sub_and_fetch(v, 1);
928#if defined(PIOMAN_MULTITHREAD)
929 __sync_fetch_and_add(v, v2);
931 nm_core_t p_core = nm_core_get_singleton();
934 __sync_fetch_and_add(v, v2);
946 __sync_fetch_and_add(v, v2);
952#if defined(PIOMAN_MULTITHREAD)
953 return __sync_bool_compare_and_swap(v, oldval, newval);
955 nm_core_t p_core = nm_core_get_singleton();
958 return __sync_bool_compare_and_swap(v, oldval, newval);
978 return __sync_bool_compare_and_swap(v, oldval, newval);
1041#if defined(NMAD_DEBUG) && !defined(PIOMAN)
1042 nm_core_t p_core = nm_core_get_singleton();
1044 if(p_spin->last_tid == (pthread_t)0)
1046 p_spin->last_tid = pthread_self();
1047 __sync_synchronize();
1051 if(p_spin->last_tid != pthread_self())
1053 NM_FATAL(
"detected calls from multiple threads in non-threaded mode. Please use pioman-enabled build for multi-threaded use or give thread level using nm_core_set_thread_level(NM_THREAD_SERIALIZED) for serialized thread level.");
1062#if defined(NMAD_DEBUG) && !defined(PIOMAN)
1063 nm_core_t p_core = nm_core_get_singleton();
1065 if(p_spin->last_tid == 0)
1067 NM_FATAL(
"unlocking while no thread is holding the lock.");
1069 else if(p_spin->last_tid != pthread_self())
1071 NM_WARN(
"unlocking from another thread than where lock was acquired.\n");
1075 p_spin->last_tid = (pthread_t)0;
1083 piom_spin_init(p_spin);
1087 p_spin->last_tid = 0;
1095 piom_spin_destroy(p_spin);
1098 assert(p_spin->lock == 0);
1106 piom_spin_lock(p_spin);
1109 __sync_synchronize();
1110 if(p_spin->lock != 0)
1112 NM_FATAL(
"spinlock is not free in nm_spin_lock(); detected concurrent access from thread = %p. Suspecting multi-threaded use by the application while library is initialized in non-threaded mode.\n",
1113 (
void*)p_spin->last_tid);
1118 nm_core_t p_core = nm_core_get_singleton();
1121 __sync_synchronize();
1129 piom_spin_unlock(p_spin);
1133 __sync_synchronize();
1134 assert(p_spin->lock == 1);
1137 nm_core_t p_core = nm_core_get_singleton();
1140 __sync_synchronize();
1148 return piom_spin_trylock(p_spin);
1152 __sync_synchronize();
1155 assert(p_spin->lock == 1);
1165 nm_core_t p_core = nm_core_get_singleton();
1168 __sync_synchronize();
1177 piom_spin_assert_locked(p_spin);
1180 assert(p_spin->lock == 1);
1181 assert(p_spin->last_tid == pthread_self());
1189 piom_spin_assert_notlocked(p_spin);
1192 assert(p_spin->lock == 0);
static int nm_atomic_compare_and_swap(int *v, int oldval, int newval)
boolean int compare and swap, atomic only when multithread
static void nm_atomic_always_add(int *v, int v2)
int add, always atomic
static int nm_atomic_dec(int *v)
decrement int, atomic only when multithread
static int nm_atomic_always_dec(int *v)
decrement int, always atomic
static int nm_atomic_inc(int *v)
increment int, atomic only when multithread
static void nm_mem_fence_always(void)
memory fence, always
static int nm_atomic_always_compare_and_swap(int *v, int oldval, int newval)
boolean int compare and swap, always atomic
static void nm_mem_fence(void)
memory fence only when multithread
static void nm_atomic_add(int *v, int v2)
int add, atomic only when multithread
static int nm_atomic_alway_inc(int *v)
increment int, always atomic
static void nm_cond_init(nm_cond_status_t *p_cond, nm_status_t bitmask)
initialize a nm_cond_status_t object
static nm_status_t nm_cond_test(const nm_cond_status_t *p_cond, nm_status_t bitmask)
test whether the given bit is set in the status; unlocked, weak consistency
static void nm_cond_add(nm_cond_status_t *p_cond, nm_status_t bitmask)
add a bit to the bitmask in the status, do not unlock waiters (for bits that will not be waited for)
static void nm_cond_wait_all(void **pp_conds, int n, uintptr_t offset, nm_status_t bitmask, nm_core_t p_core)
wait on multiple statuses at the same time
static void nm_cond_signal(nm_cond_status_t *p_cond, nm_status_t bitmask)
add a bit and wake up threads waiting for it
static nm_status_t nm_cond_test_locked(const nm_cond_status_t *p_cond, nm_status_t bitmask)
test whether the given bit is set in the status; locked, guaranteed consistency, slower
static void nm_cond_wait(nm_cond_status_t *p_cond, nm_status_t bitmask, nm_core_t p_core)
wait for the given bit to be set in the status; do active polling while waiting
static void nm_cond_mask(nm_cond_status_t *p_cond, nm_status_t bitmask)
static void nm_cond_destroy(nm_cond_status_t *p_cond)
free resources associated with a nm_cond_status_t object
void nm_core_monitor_add(nm_core_t p_core, struct nm_core_monitor_s *m)
Register an event monitor.
void(* nm_core_event_notifier_t)(const struct nm_core_event_s *const event, void *ref)
an event notifier, fired upon status transition
struct nm_core_event_s __attribute__
void nm_core_req_monitor(struct nm_core *p_core, struct nm_req_s *p_req, struct nm_monitor_s monitor)
set a per-request monitor.
void nm_core_monitor_remove(nm_core_t p_core, struct nm_core_monitor_s *m)
Unregister an event monitor.
int nm_core_driver_load_init(nm_core_t p_core, puk_component_t driver, nm_trk_kind_t kind, nm_drv_t *pp_drv, const char **p_url)
puk_component_t nm_core_component_load(const char *entity, const char *name)
void nm_core_schedopt_disable(nm_core_t p_core)
disable schedopt for raw driver use
struct nm_core * nm_core_t
int nm_schedule(nm_core_t p_core)
struct nm_drv_s * nm_drv_t
a nmad driver; opaque type for the user
struct nm_core_internal_s nm_core_internal
int nm_core_exit(nm_core_t p_core)
nm_thread_level_t nm_core_get_thread_level(nm_core_t)
Get the current thread level.
enum nm_thread_level_e nm_thread_level_t
int nm_core_set_strategy(nm_core_t p_core, puk_component_t strategy)
PUK_VECT_TYPE(nm_drv, nm_drv_t)
nm_gate_t nm_core_gate_new(nm_core_t p_core, nm_drv_vect_t *p_drvs)
Init a new gate, using the given set of drivers.
void nm_trace_add_synchro_point(void)
generate a synchronization event to synchronize nmad traces with others (e.g.
void nm_core_set_thread_level(nm_thread_level_t)
Sets the thread level before nm core init.
int nm_core_init(nm_core_t *pp_core)
void nm_core_gate_connect_wait(nm_core_t p_core, struct nm_trk_s *p_trk)
wait for connection completion
void nm_core_gate_connect_async(nm_core_t p_core, nm_gate_t gate, nm_drv_t p_drv, nm_trk_id_t trk_id, const char *url)
start connection process on given gate/trk
int nm_core_unpack_iprobe(struct nm_core *p_core, struct nm_req_s *p_unpack)
probes whether an incoming packet matched this unposted request.
void nm_core_inject_finalize(struct nm_core *p_core, struct nm_req_s *p_req)
finalize an injected request that was only completed.
int nm_core_unpack_cancel(struct nm_core *p_core, struct nm_req_s *p_unpack)
cancel a pending unpack
void nm_core_pack_data(nm_core_t p_core, struct nm_req_s *p_pack, const struct nm_data_s *p_data)
build a pack request from data descriptor
PUK_LIST_DECLARE_TYPE(nm_req_chunk)
void nm_core_unpack_init(struct nm_core *p_core, struct nm_req_s *p_unpack)
initializes an empty unpack request
int nm_core_unpack_peek(struct nm_core *p_core, struct nm_req_s *p_unpack, const struct nm_data_s *p_data, nm_len_t peek_offset, nm_len_t peek_len)
peeks unexpected data without consumming it.
int nm_core_unpack_partition_test(struct nm_req_s *p_unpack, int partition)
void nm_core_inject_chunk(struct nm_core *p_core, nm_gate_t p_gate, nm_core_tag_t tag, nm_seq_t seq, nm_len_t chunk_offset, nm_len_t chunk_len, int is_last_chunk, nm_injector_pull_data_t p_pull_data, void *p_ref)
inject a packet in nmad core as if it arrived from network.
int nm_core_iprobe(struct nm_core *p_core, nm_gate_t p_gate, nm_core_tag_t tag, nm_core_tag_t tag_mask, nm_gate_t *pp_out_gate, nm_core_tag_t *p_out_tag, nm_len_t *p_out_size)
probe unexpected packet, check matching for (packet_tag & tag_mask) == tag
void nm_core_pack_send(struct nm_core *p_core, struct nm_req_s *p_pack, nm_core_tag_t tag, nm_gate_t p_gate, nm_req_flag_t flags)
set tag/gate/flags for pack request
void(* nm_injector_pull_data_t)(struct nm_req_s *p_req, const struct nm_data_s *p_data, nm_len_t chunk_offset, nm_len_t chunk_len, void *p_ref)
user-supplied function called to pull data to posted request through nmad core p_req is the user requ...
void nm_core_unpack_offset(struct nm_core *p_core, struct nm_req_s *p_unpack, nm_len_t offset)
set an offset on data; data before offset will be discarded
void nm_core_unpack_match_recv(struct nm_core *p_core, struct nm_req_s *p_unpack, nm_gate_t p_gate, nm_core_tag_t tag, nm_core_tag_t tag_mask)
match an unpack request with given gate/tag, next sequence number assumed
void nm_core_pack_init(struct nm_core *p_core, struct nm_req_s *p_pack)
initializes an empty pack request
void nm_core_inject_complete_finalize(struct nm_core *p_core, struct nm_req_s *p_req, nm_len_t chunk_offset, nm_len_t chunk_len)
notify data was injected in a matched request and finalize this request.
void nm_core_unpack_partition_set(struct nm_req_s *p_unpack, int n_partitions)
void nm_core_pack_submit(struct nm_core *p_core, struct nm_req_s *p_pack)
post a pack request
void nm_core_inject_complete(struct nm_core *p_core, struct nm_req_s *p_req, nm_len_t chunk_offset, nm_len_t chunk_len)
notify data was injected in a matched request, but do not finalize the request (the status will be NM...
void nm_core_unpack_data(struct nm_core *p_core, struct nm_req_s *p_unpack, const struct nm_data_s *p_data)
build an unpack request from data descriptor
void nm_core_flush(struct nm_core *p_core)
Flush pending packs (if supported by the strategy).
static void nm_core_pack_set_hlen(struct nm_core *p_core __attribute__((unused)), struct nm_req_s *p_pack, nm_len_t hlen)
set a header length for the given pack request
void nm_core_unpack_match_event(struct nm_core *p_core, struct nm_req_s *p_unpack, const struct nm_core_event_s *p_event)
match an unpack request with a packet that triggered an event
void nm_core_unpack_partition_free(struct nm_req_s *p_unpack)
void nm_core_unpack_submit(struct nm_core *p_core, struct nm_req_s *p_unpack, nm_req_flag_t flags)
submit an unpack request
void nm_core_pack_set_priority(struct nm_core *p_core, struct nm_req_s *p_pack, nm_prio_t priority)
set a priority for the given pack request
nm_seq_t nm_core_send_seq_get(struct nm_core *p_core, nm_gate_t p_gate, nm_core_tag_t tag)
get a seq number in the out stream, to route packet outside of nmad core
void nm_core_pack_submit_chunks(struct nm_core *p_core, struct nm_req_s *p_pack, int n, const struct nm_chunk_s *p_chunks)
static nm_session_hash_t nm_core_tag_get_hashcode(nm_core_tag_t core_tag)
uint32_t nm_session_hash_t
a session hashcode in tags, used to multiplex sessions
static nm_core_tag_t nm_core_tag_build(nm_session_hash_t hashcode, nm_tag_t tag)
static nm_tag_t nm_core_tag_get_tag(nm_core_tag_t core_tag)
enum nm_core_task_kind_e nm_core_task_kind_t
void nm_core_task_submit_unlocked(struct nm_core *p_core, void(*p_handler)(void))
submit task lock-free to the submission list This is used mostly for benchmarks.
void nm_core_task_submit_locked(struct nm_core *p_core, void(*p_handler)(void))
lock then submit task to pending list This is used mostly for benchmarks.
@ NM_CORE_TASK_COMPLETED_PREFETCH
prefetch completed; process RTR if received
@ NM_CORE_TASK_UNPACK_NEXT
try to match the next unpack on the given gate/tag/gtag
@ NM_CORE_TASK_RTR_SEND
send a RTR once the large pw for recv has been posted
@ NM_CORE_TASK_PACK_SUBMISSION
process a submitted pack request
@ NM_CORE_TASK_COMPLETED_PW
process a completed pw
@ NM_CORE_TASK_HANDLER
call a user handler, mainly for testing/benchmarking
static void nm_spin_check_nothread(nm_spinlock_t *p_spin __attribute__((unused)))
check that we are always called from the same thread in case of non-threaded mode
static void nm_spin_init(nm_spinlock_t *p_spin)
init the spin lock
static void nm_spin_assert_notlocked(nm_spinlock_t *p_spin)
assert that current thread doesn't hold the lock
static int nm_spin_trylock(nm_spinlock_t *p_spin)
try to lock the spin lock return 1 if lock is successfully acquired, 0 otherwise
static void nm_spin_clear_nothread(nm_spinlock_t *p_spin __attribute__((unused)))
clear the last_tid tracking for lock consistency checking
static void nm_spin_destroy(nm_spinlock_t *p_spin)
destroy the spin lock
struct nm_spinlock_s nm_spinlock_t
static void nm_spin_lock(nm_spinlock_t *p_spin)
acquire the spin lock
static void nm_spin_assert_locked(nm_spinlock_t *p_spin)
assert that current thread holds the lock
static void nm_spin_unlock(nm_spinlock_t *p_spin)
release the spin lock
static void nm_status_signal(struct nm_req_s *p_req, nm_status_t bitmask)
add the bits from bitmak to the status and wakes-up all others waiting on nm_status_wait().
static void nm_status_wait_all(void **pp_reqs, int n, uintptr_t offset, nm_status_t bitmask, nm_core_t p_core)
wait for all reqs, any bit in bitmask
static void nm_status_unset(struct nm_req_s *p_req, nm_status_t bitmask)
remove bits of bitmask from req status
static void nm_status_spinwait(struct nm_req_s *p_req, nm_status_t status)
static void nm_status_init(struct nm_req_s *p_req, nm_status_t bitmask)
initialize cond status with given initial value
static int nm_status_test_allbits(struct nm_req_s *p_req, nm_status_t bitmask)
tests for all given bits in status
static void nm_status_destroy(struct nm_req_s *p_req)
static void nm_status_add(struct nm_req_s *p_req, nm_status_t bitmask)
add a bit to the status of the request; does not unlock others (no signal)
static void nm_status_wait(struct nm_req_s *p_req, nm_status_t bitmask, nm_core_t p_core)
wait for any bit matching in req status
static nm_status_t nm_status_test(const struct nm_req_s *p_req, nm_status_t bitmask)
query for given bits in req status; returns matched bits
static void nm_status_assert(struct nm_req_s *p_req __attribute__((unused)), nm_status_t value __attribute__((unused)))
nm_status_t nm_cond_status_t
status with synchronization (wait/signal)
uint32_t nm_req_flag_t
pack/unpack flags
#define NM_STATUS_FINALIZED
request is finalized, may be freed
#define NM_STATUS_MASK_FULL
mask to catch all bits of status
uint32_t nm_status_t
status bits of pack/unpack requests
uint32_t nm_req_chunk_flag_t
flags for req_chunk
nm_session_hash_t hashcode
the session hashcode
nm_tag_t tag
the user-supplied tag
Basic primitives to display info & warnings.
#define NM_FATAL(format,...)
#define NM_WARN(format,...)
nm_gate_t gate
gate of the destination or the source node
nm_mpi_status_t status
status of request
This is the common public header for NewMad.
int8_t nm_trk_id_t
ID of a track, assigned in order.
uint64_t nm_req_seq_t
sequence number for requests
int32_t nm_prio_t
message priority
uint64_t nm_tag_t
user tags, 64 bits, contained in indirect hashtable
uint64_t nm_len_t
data length used by nmad
uint32_t nm_seq_t
Sequence number for packets on a given gate/tag.
uint8_t nm_proto_t
protocol flags- not part of the public API, but needed for inline
enum nm_trk_kind_e nm_trk_kind_t
matching info for global monitors
nm_core_tag_t tag_mask
the mask to apply before comparing tags (only bits set in mask will be checked)
nm_core_tag_t tag
the tag to listen too
nm_gate_t p_gate
the gate to listen to, or NM_ANY_GATE for any
An event, generated by the NewMad core.
struct nm_req_s * p_req
the request that matched the event- NULL in case of unexpected packets
nm_status_t status
status flags- describe the event
exposed here for inlining; do not use this value, use the accessor nm_core_get_singleton()
nm_core_t p_core_singleton
global monitor for status transitions
struct nm_monitor_s monitor
the monitor to fire upon matching event
struct nm_core_event_matching_s matching
packet matching information
nm_tag_t tag
the user-supplied tag
nm_session_hash_t hashcode
the session hashcode
asynchronous tasks for nmad core.
struct nm_core_task_s::@10::@11 unpack_next
struct nm_req_s * p_unpack
struct nm_core_task_s::@10::@12 completed_pw
struct nm_core_task_s::@10::@16 handler
struct nm_core_task_s::@10::@14 pack_submission
struct nm_core_task_s::@10::@13 completed_prefetch
union nm_core_task_s::@10 content
enum nm_core_task_kind_e kind
struct nm_pkt_wrap_s * p_pw
struct nm_core_task_s::@10::@15 rtr_send
struct nm_req_chunk_s * p_req_chunk
struct nm_unexpected_s * p_unexpected
Core NewMadeleine structure.
block of static properties for a given data descriptor
a data descriptor, used to pack/unpack data from app layout to/from contiguous buffers
const char * url
driver url, as string
const struct nm_minidriver_iface_s * driver
Driver interface, for use when no instance is needed.
Connection to another process.
status of tags on each gate
containers for matching info, used for caching
struct nm_gtag_s * p_gtag
cache of gtag
struct nm_matching_gsession_s * p_gsession
cache of matching gsession
struct nm_matching_wildcard_s * p_wildcard
cache of matching wildcard
struct nm_matching_tag_s * p_matching_tag
cache of matching tag
struct to store matching info for any-source requests of a given tag
struct to store matching info for wildcard requests, one per session
generic monitor, used for requests and for global events (with matching)
nm_status_t event_mask
mask applied to status to check whether to fire events
nm_core_event_notifier_t p_notifier
notification function called to fire events
void * ref
opaque user-supplied pointer passed to notifier
PUK_LIST_LINK(nm_req_chunk)
nm_len_t chunk_offset
offset of the chunk relative to the full data in the req
struct nm_core_task_s core_task
nm_proto_t proto_flags
pre-computed proto flags
struct nm_data_properties_s chunk_props
properties of the data chunk
nm_len_t chunk_len
length of the chunk
struct nm_req_s * p_req
link to insert the req chunk as a core task
a generic pack/unpack request
nm_core_tag_t tag
tag to send to/from (works in combination with tag_mask for recv)
nm_len_t expected_len
length of posted recv (may be updated if matched packet is shorter)
PUK_LIST_LINK(nm_req)
link to enqueue req in pending requests lists
nm_gate_t p_gate
dest/src gate; NULL if recv from any source
struct nm_matching_container_s matching
link to store request in a matching map
nm_len_t done
cumulated length of data sent so far
nm_prio_t priority
request priority level
uint32_t checksum
data checkusm when pack was submitted- for debug only
struct nm_req_pchunk_s * p_next
struct nm_gtag_s * p_gtag
cache for tag status on gate; NULL if tag or gate is unspecified yet
nm_len_t offset
offset of data partially received
nm_cond_status_t status
status, including status bits and synchronization
nm_req_seq_t req_seq
request sequence number used to interleave wildcard/non-wildcard requests
struct nm_req_chunk_s req_chunk
preallocated chunk for the common case (single-chunk)
struct nm_req_s::@17::@20::@21 partition
partitioned unpack, used only if NM_REQ_FLAG_UNPACK_PARTITIONED is set
nm_req_flag_t flags
flags given by user
struct nm_data_s data
data descriptor to send/recv
struct nm_req_s::@17::@19 pack
struct nm_pkt_wrap_s * p_prefetch_pw
packet wrapper to prefetch recv
nm_core_tag_t tag_mask
mask applied to tag for matching (only bits in mask need to match)
struct nm_req_s::@17::@20 unpack
struct nm_req_s::@17::@20::@21::nm_req_pchunk_s * p_pchunks
unsorted list of arrived chunks; reads are lock-free, writes are within core_core_lock sections
nm_len_t cumulated_len
amount of data unpacked so far
nm_len_t len
cumulated data length
int err
error status of the request
struct nm_monitor_s monitor
monitor attached to this request (only 1)
nm_seq_t seq
packet sequence number on the given tag
nm_len_t hlen
length of header to send eagerly
a chunk of unexpected message to be stored