17#ifndef NM_CORE_INTERFACE_H
18#define NM_CORE_INTERFACE_H
24#include <Padico/Puk.h>
68static inline nm_core_t nm_core_get_singleton(
void)
82 nm_drv_t *pp_drv,
const char**p_url);
136#define NM_STATUS_NONE ((nm_status_t)0x00000000)
138#define NM_STATUS_PACK_INIT ((nm_status_t)0x00000001)
140#define NM_STATUS_UNPACK_INIT ((nm_status_t)0x00000002)
142#define NM_STATUS_PACK_COMPLETED ((nm_status_t)0x00000004)
144#define NM_STATUS_UNPACK_COMPLETED ((nm_status_t)0x00000008)
146#define NM_STATUS_UNEXPECTED ((nm_status_t)0x00000010)
148#define NM_STATUS_UNPACK_CANCELLED ((nm_status_t)0x00000020)
150#define NM_STATUS_PACK_POSTED ((nm_status_t)0x00000040)
152#define NM_STATUS_UNPACK_POSTED ((nm_status_t)0x00000080)
154#define NM_STATUS_ACK_RECEIVED ((nm_status_t)0x00000100)
156#define NM_STATUS_UNPACK_DATA0 ((nm_status_t)0x00000200)
158#define NM_STATUS_UNPACK_DATA_SIZE ((nm_status_t)0x00000400)
160#define NM_STATUS_FINALIZED ((nm_status_t)0x00000800)
162#define NM_STATUS_ERROR ((nm_status_t)0x00001000)
164#define NM_STATUS_PACK_MSG_SIZE ((nm_status_t)0x00002000)
166#define NM_STATUS_UNPACK_PREFETCHED ((nm_status_t)0x00004000)
169#define NM_STATUS_MASK_FULL ((nm_status_t)-1)
176#define NM_REQ_FLAG_NONE ((nm_req_flag_t)0x00000000)
178#define NM_REQ_FLAG_PACK_SYNCHRONOUS ((nm_req_flag_t)0x00001000)
180#define NM_REQ_FLAG_PACK ((nm_req_flag_t)0x00002000)
182#define NM_REQ_FLAG_UNPACK ((nm_req_flag_t)0x00004000)
184#define NM_REQ_FLAG_UNPACK_DATA_INFO ((nm_req_flag_t)0x00008000)
186#define NM_REQ_FLAG_UNPACK_MATCHING_INFO ((nm_req_flag_t)0x00010000)
188#define NM_REQ_FLAG_UNPACK_PREFETCHING ((nm_req_flag_t)0x00020000)
190#define NM_REQ_FLAG_MATCHING_WILDCARD ((nm_req_flag_t)0x00100000)
192#define NM_REQ_FLAG_MATCHING_GATE ((nm_req_flag_t)0x00200000)
194#define NM_REQ_FLAG_MATCHING_TAG ((nm_req_flag_t)0x00400000)
196#define NM_REQ_FLAG_MATCHING_FULL ((nm_req_flag_t)0x00800000)
198#define NM_REQ_FLAG_FINALIZE_LATER ((nm_req_flag_t)0x01000000)
200#define NM_REQ_FLAG_UNPACK_PARTITIONED ((nm_req_flag_t)0x02000000)
202#define NM_REQ_FLAG_PACK_PARTITIONED ((nm_req_flag_t)0x04000000)
208#define NM_REQ_CHUNK_FLAG_NONE ((nm_req_chunk_flag_t)0x00000000)
210#define NM_REQ_CHUNK_FLAG_SHORT ((nm_req_chunk_flag_t)0x00020000)
212#define NM_REQ_CHUNK_FLAG_USE_COPY ((nm_req_chunk_flag_t)0x00080000)
214#define NM_REQ_CHUNK_FLAG_DATA_ITERATOR ((nm_req_chunk_flag_t)0x00100000)
223#define NM_CORE_TAG_HASH_FULL ((nm_session_hash_t)0xFFFFFFFF)
234#define NM_CORE_TAG_MASK_FULL ((nm_core_tag_t){ .tag = NM_TAG_MASK_FULL, .hashcode = NM_CORE_TAG_HASH_FULL })
235#define NM_CORE_TAG_NONE ((nm_core_tag_t){ .tag = 0, .hashcode = 0x0 })
301#define NM_EVENT_MATCHING_ANY ((struct nm_core_event_matching_s){ .p_gate = NM_ANY_GATE, .tag = NM_CORE_TAG_NONE, .tag_mask = NM_CORE_TAG_NONE })
303#define NM_MONITOR_NULL ((struct nm_monitor_s){ .p_notifier = NULL, .event_mask = 0, .ref = NULL })
305#define NM_CORE_MONITOR_NULL ((struct nm_core_monitor_s){ .monitor = NM_MONITOR_NULL, .matching = NM_EVENT_MATCHING_ANY })
324#define NM_MATCHING_CONTAINER_NULL ((struct nm_matching_container_s) { NULL })
432 struct nm_req_pchunk_s
598 piom_cond_init(p_cond, bitmask);
603 piom_cond_destroy(p_cond);
608 return piom_cond_test(p_cond, bitmask);
618 piom_cond_add(p_cond, bitmask);
623 piom_cond_mask(p_cond, bitmask);
628 piom_cond_wait(p_cond, bitmask);
633 piom_cond_signal(p_cond, bitmask);
638 piom_cond_wait_all(pp_conds, n, offset, bitmask);
653 return ((*p_cond) & bitmask);
687 for(i = 0; i < n; i++)
689 if(pp_conds[i] != NULL)
741 const uintptr_t status_offset = (uintptr_t)&p_req->
status - (uintptr_t)p_req;
765 __sync_synchronize();
771#if defined(PIOMAN_MULTITHREAD)
772 __sync_synchronize();
774 nm_core_t p_core = nm_core_get_singleton();
777 __sync_synchronize();
785#if defined(PIOMAN_MULTITHREAD)
786 return __sync_fetch_and_add(v, 1);
788 nm_core_t p_core = nm_core_get_singleton();
791 return __sync_fetch_and_add(v, 1);
803 return __sync_fetch_and_add(v, 1);
809#if defined(PIOMAN_MULTITHREAD)
810 return __sync_sub_and_fetch(v, 1);
812 nm_core_t p_core = nm_core_get_singleton();
815 return __sync_sub_and_fetch(v, 1);
827 return __sync_sub_and_fetch(v, 1);
833#if defined(PIOMAN_MULTITHREAD)
834 __sync_fetch_and_add(v, v2);
836 nm_core_t p_core = nm_core_get_singleton();
839 __sync_fetch_and_add(v, v2);
851 __sync_fetch_and_add(v, v2);
857#if defined(PIOMAN_MULTITHREAD)
858 return __sync_bool_compare_and_swap(v, oldval, newval);
860 nm_core_t p_core = nm_core_get_singleton();
863 return __sync_bool_compare_and_swap(v, oldval, newval);
883 return __sync_bool_compare_and_swap(v, oldval, newval);
939#if defined(NMAD_DEBUG) && !defined(PIOMAN)
940 nm_core_t p_core = nm_core_get_singleton();
942 if(p_spin->last_tid == (pthread_t)0)
944 p_spin->last_tid = pthread_self();
945 __sync_synchronize();
949 if(p_spin->last_tid != pthread_self())
951 NM_FATAL(
"detected calls from multiple threads in non-threaded mode. Please use pioman-enabled build for multi-threaded use or give thread level using nm_core_set_thread_level(NM_THREAD_SERIALIZED) for serialized thread level.");
960#if defined(NMAD_DEBUG) && !defined(PIOMAN)
961 nm_core_t p_core = nm_core_get_singleton();
963 if(p_spin->last_tid == 0)
965 NM_FATAL(
"unlocking while no thread is holding the lock.");
967 else if(p_spin->last_tid != pthread_self())
969 NM_WARN(
"unlocking from another thread than where lock was acquired.\n");
973 p_spin->last_tid = (pthread_t)0;
981 piom_spin_init(p_spin);
985 p_spin->last_tid = 0;
993 piom_spin_destroy(p_spin);
996 assert(p_spin->lock == 0);
1004 piom_spin_lock(p_spin);
1007 __sync_synchronize();
1008 if(p_spin->lock != 0)
1010 NM_FATAL(
"spinlock is not free in nm_spin_lock(); detected concurrent access from thread = %p. Suspecting multi-threaded use by the application while library is initialized in non-threaded mode.\n",
1011 (
void*)p_spin->last_tid);
1016 nm_core_t p_core = nm_core_get_singleton();
1019 __sync_synchronize();
1027 piom_spin_unlock(p_spin);
1031 __sync_synchronize();
1032 assert(p_spin->lock == 1);
1035 nm_core_t p_core = nm_core_get_singleton();
1038 __sync_synchronize();
1046 return piom_spin_trylock(p_spin);
1050 __sync_synchronize();
1053 assert(p_spin->lock == 1);
1063 nm_core_t p_core = nm_core_get_singleton();
1066 __sync_synchronize();
1075 piom_spin_assert_locked(p_spin);
1078 assert(p_spin->lock == 1);
1079 assert(p_spin->last_tid == pthread_self());
1087 piom_spin_assert_notlocked(p_spin);
1090 assert(p_spin->lock == 0);
int nm_core_driver_load_init(nm_core_t p_core, puk_component_t driver, nm_trk_kind_t kind, nm_drv_t *pp_drv, const char **p_url)
int nm_core_unpack_iprobe(struct nm_core *p_core, struct nm_req_s *p_unpack)
probes whether an incoming packet matched this unposted request.
void nm_core_inject_finalize(struct nm_core *p_core, struct nm_req_s *p_req)
finalize an injected request that was only completed.
int nm_core_unpack_cancel(struct nm_core *p_core, struct nm_req_s *p_unpack)
cancel a pending unpack
void nm_core_pack_data(nm_core_t p_core, struct nm_req_s *p_pack, const struct nm_data_s *p_data)
build a pack request from data descriptor
PUK_LIST_DECLARE_TYPE(nm_req_chunk)
void nm_core_unpack_init(struct nm_core *p_core, struct nm_req_s *p_unpack)
initializes an empty unpack request
static int nm_atomic_compare_and_swap(int *v, int oldval, int newval)
boolean int compare and swap, atomic only when multithread
puk_component_t nm_core_component_load(const char *entity, const char *name)
int nm_core_unpack_peek(struct nm_core *p_core, struct nm_req_s *p_unpack, const struct nm_data_s *p_data, nm_len_t peek_offset, nm_len_t peek_len)
peeks unexpected data without consumming it.
void nm_core_monitor_add(nm_core_t p_core, struct nm_core_monitor_s *m)
Register an event monitor.
void nm_core_schedopt_disable(nm_core_t p_core)
disable schedopt for raw driver use
int nm_core_unpack_partition_test(struct nm_req_s *p_unpack, int partition)
void nm_core_inject_chunk(struct nm_core *p_core, nm_gate_t p_gate, nm_core_tag_t tag, nm_seq_t seq, nm_len_t chunk_offset, nm_len_t chunk_len, int is_last_chunk, nm_injector_pull_data_t p_pull_data, void *p_ref)
inject a packet in nmad core as if it arrived from network.
int nm_core_iprobe(struct nm_core *p_core, nm_gate_t p_gate, nm_core_tag_t tag, nm_core_tag_t tag_mask, nm_gate_t *pp_out_gate, nm_core_tag_t *p_out_tag, nm_len_t *p_out_size)
probe unexpected packet, check matching for (packet_tag & tag_mask) == tag
void nm_core_pack_send(struct nm_core *p_core, struct nm_req_s *p_pack, nm_core_tag_t tag, nm_gate_t p_gate, nm_req_flag_t flags)
set tag/gate/flags for pack request
nm_status_t nm_cond_status_t
status with synchronization (wait/signal)
static void nm_atomic_always_add(int *v, int v2)
int add, always atomic
void(* nm_injector_pull_data_t)(struct nm_req_s *p_req, const struct nm_data_s *p_data, nm_len_t chunk_offset, nm_len_t chunk_len, void *p_ref)
user-supplied function called to pull data to posted request through nmad core p_req is the user requ...
struct nm_core * nm_core_t
int nm_schedule(nm_core_t p_core)
static void nm_status_signal(struct nm_req_s *p_req, nm_status_t bitmask)
void nm_core_unpack_offset(struct nm_core *p_core, struct nm_req_s *p_unpack, nm_len_t offset)
set an offset on data; data before offset will be discarded
uint32_t nm_req_flag_t
pack/unpack flags
#define NM_STATUS_FINALIZED
request is finalized, may be freed
static void nm_spin_check_nothread(nm_spinlock_t *p_spin __attribute__((unused)))
check that we are always called from the same thread in case of non-threaded mode
void nm_core_unpack_match_recv(struct nm_core *p_core, struct nm_req_s *p_unpack, nm_gate_t p_gate, nm_core_tag_t tag, nm_core_tag_t tag_mask)
match an unpack request with given gate/tag, next sequence number assumed
static int nm_atomic_dec(int *v)
decrement int, atomic only when multithread
static void nm_spin_init(nm_spinlock_t *p_spin)
init the spin lock
void(* nm_core_event_notifier_t)(const struct nm_core_event_s *const event, void *ref)
an event notifier, fired upon status transition
void nm_core_pack_init(struct nm_core *p_core, struct nm_req_s *p_pack)
initializes an empty pack request
void nm_core_inject_complete_finalize(struct nm_core *p_core, struct nm_req_s *p_req, nm_len_t chunk_offset, nm_len_t chunk_len)
notify data was injected in a matched request and finalize this request.
static nm_session_hash_t nm_core_tag_get_hashcode(nm_core_tag_t core_tag)
#define NM_STATUS_MASK_FULL
mask to catch all bits of status
static void nm_spin_assert_notlocked(nm_spinlock_t *p_spin)
assert that current thread doesn't hold the lock
void nm_core_unpack_partition_set(struct nm_req_s *p_unpack, int n_partitions)
struct nm_core_event_s __attribute__
void nm_core_pack_submit(struct nm_core *p_core, struct nm_req_s *p_pack)
post a pack request
void nm_core_inject_complete(struct nm_core *p_core, struct nm_req_s *p_req, nm_len_t chunk_offset, nm_len_t chunk_len)
notify data was injected in a matched request, but do not finalize the request (the status will be NM...
struct nm_drv_s * nm_drv_t
a nmad driver; opaque type for the user
struct nm_core_internal_s nm_core_internal
static void nm_status_wait_all(void **pp_reqs, int n, uintptr_t offset, nm_status_t bitmask, nm_core_t p_core)
wait for all reqs, any bit in bitmask
int nm_core_exit(nm_core_t p_core)
uint32_t nm_session_hash_t
a session hashcode in tags, used to multiplex sessions
void nm_core_unpack_data(struct nm_core *p_core, struct nm_req_s *p_unpack, const struct nm_data_s *p_data)
build an unpack request from data descriptor
enum nm_core_task_kind_e nm_core_task_kind_t
static void nm_status_unset(struct nm_req_s *p_req, nm_status_t bitmask)
remove bits of bitmask from req status
nm_thread_level_t nm_core_get_thread_level(nm_core_t)
Get the current thread level.
static int nm_spin_trylock(nm_spinlock_t *p_spin)
try to lock the spin lock return 1 if lock is successfully acquired, 0 otherwise
static void nm_spin_clear_nothread(nm_spinlock_t *p_spin __attribute__((unused)))
clear the last_tid tracking for lock consistency checking
void nm_core_flush(struct nm_core *p_core)
Flush pending packs (if supported by the strategy).
static int nm_atomic_always_dec(int *v)
decrement int, always atomic
enum nm_thread_level_e nm_thread_level_t
static void nm_core_pack_set_hlen(struct nm_core *p_core __attribute__((unused)), struct nm_req_s *p_pack, nm_len_t hlen)
set a header length for the given pack request
uint32_t nm_status_t
status bits of pack/unpack requests
static nm_core_tag_t nm_core_tag_build(nm_session_hash_t hashcode, nm_tag_t tag)
static void nm_status_spinwait(struct nm_req_s *p_req, nm_status_t status)
int nm_core_set_strategy(nm_core_t p_core, puk_component_t strategy)
static void nm_status_init(struct nm_req_s *p_req, nm_status_t bitmask)
initialize cond status with given initial value
static void nm_spin_destroy(nm_spinlock_t *p_spin)
destroy the spin lock
PUK_VECT_TYPE(nm_drv, nm_drv_t)
static int nm_atomic_inc(int *v)
increment int, atomic only when multithread
nm_gate_t nm_core_gate_new(nm_core_t p_core, nm_drv_vect_t *p_drvs)
Init a new gate, using the given set of drivers.
static int nm_status_test_allbits(struct nm_req_s *p_req, nm_status_t bitmask)
tests for all given bits in status
void nm_core_unpack_match_event(struct nm_core *p_core, struct nm_req_s *p_unpack, const struct nm_core_event_s *p_event)
match an unpack request with a packet that triggered an event
static nm_tag_t nm_core_tag_get_tag(nm_core_tag_t core_tag)
static void nm_status_destroy(struct nm_req_s *p_req)
struct nm_spinlock_s nm_spinlock_t
static void nm_spin_lock(nm_spinlock_t *p_spin)
acquire the spin lock
void nm_core_unpack_partition_free(struct nm_req_s *p_unpack)
static void nm_cond_mask(nm_cond_status_t *p_cond, nm_status_t bitmask)
void nm_core_task_submit_unlocked(struct nm_core *p_core, void(*p_handler)(void))
submit task lock-free to the submission list
static void nm_mem_fence_always(void)
memory fence, always
void nm_core_unpack_submit(struct nm_core *p_core, struct nm_req_s *p_unpack, nm_req_flag_t flags)
submit an unpack request
static void nm_spin_assert_locked(nm_spinlock_t *p_spin)
assert that current thread holds the lock
static int nm_atomic_always_compare_and_swap(int *v, int oldval, int newval)
boolean int compare and swap, always atomic
void nm_core_set_thread_level(nm_thread_level_t)
Sets the thread level before nm core init.
static void nm_mem_fence(void)
memory fence only when multithread
void nm_core_req_monitor(struct nm_core *p_core, struct nm_req_s *p_req, struct nm_monitor_s monitor)
set a per-request monitor.
uint32_t nm_req_chunk_flag_t
flags for req_chunk
void nm_core_pack_set_priority(struct nm_core *p_core, struct nm_req_s *p_pack, nm_prio_t priority)
set a priority for the given pack request
static void nm_spin_unlock(nm_spinlock_t *p_spin)
release the spin lock
static void nm_status_add(struct nm_req_s *p_req, nm_status_t bitmask)
static void nm_status_wait(struct nm_req_s *p_req, nm_status_t bitmask, nm_core_t p_core)
wait for any bit matching in req status
static void nm_atomic_add(int *v, int v2)
int add, atomic only when multithread
static int nm_atomic_alway_inc(int *v)
increment int, always atomic
void nm_core_task_submit_locked(struct nm_core *p_core, void(*p_handler)(void))
lock then submit task to pending list
static nm_status_t nm_status_test(const struct nm_req_s *p_req, nm_status_t bitmask)
query for given bits in req status; returns matched bits
static void nm_status_assert(struct nm_req_s *p_req __attribute__((unused)), nm_status_t value __attribute__((unused)))
int nm_core_init(nm_core_t *pp_core)
void nm_core_gate_connect_wait(nm_core_t p_core, struct nm_trk_s *p_trk)
wait for connection completion
void nm_core_monitor_remove(nm_core_t p_core, struct nm_core_monitor_s *m)
Unregister an event monitor.
nm_seq_t nm_core_send_seq_get(struct nm_core *p_core, nm_gate_t p_gate, nm_core_tag_t tag)
get a seq number in the out stream, to route packet outside of nmad core
void nm_core_gate_connect_async(nm_core_t p_core, nm_gate_t gate, nm_drv_t p_drv, nm_trk_id_t trk_id, const char *url)
start connection process on given gate/trk
void nm_core_pack_submit_chunks(struct nm_core *p_core, struct nm_req_s *p_pack, int n, const struct nm_chunk_s *p_chunks)
@ NM_CORE_TASK_COMPLETED_PREFETCH
prefetch completed; process RTR if received
@ NM_CORE_TASK_UNPACK_NEXT
try to match the next unpack on the given gate/tag/gtag
@ NM_CORE_TASK_RTR_SEND
send a RTR once the large pw for recv has been posted
@ NM_CORE_TASK_PACK_SUBMISSION
process a submitted pack request
@ NM_CORE_TASK_COMPLETED_PW
process a completed pw
@ NM_CORE_TASK_HANDLER
call a user handler, mainly for testing/benchmarking
static void nm_cond_init(nm_cond_status_t *p_cond, nm_status_t bitmask)
initialize a nm_cond_status_t object
static nm_status_t nm_cond_test(const nm_cond_status_t *p_cond, nm_status_t bitmask)
test whether the given bit is set in the status; unlocked, weak consistency
static void nm_cond_add(nm_cond_status_t *p_cond, nm_status_t bitmask)
add a bit to the bitmask in the status, do not unlock waiters (for bits that will not be waited for)
static void nm_cond_wait_all(void **pp_conds, int n, uintptr_t offset, nm_status_t bitmask, nm_core_t p_core)
wait on multiple statuses at the same time
static void nm_cond_signal(nm_cond_status_t *p_cond, nm_status_t bitmask)
add a bit and wake up threads waiting for it
static nm_status_t nm_cond_test_locked(const nm_cond_status_t *p_cond, nm_status_t bitmask)
test whether the given bit is set in the status; locked, guaranteed consistency, slower
static void nm_cond_wait(nm_cond_status_t *p_cond, nm_status_t bitmask, nm_core_t p_core)
wait for the given bit to be set in the status; do active polling while waiting
static void nm_cond_destroy(nm_cond_status_t *p_cond)
free resources associated with a nm_cond_status_t object
nm_session_hash_t hashcode
the session hashcode
nm_tag_t tag
the user-supplied tag
Basic primitives to display info & warnings.
#define NM_FATAL(format,...)
#define NM_WARN(format,...)
nm_gate_t gate
gate of the destination or the source node
nm_mpi_status_t status
status of request
This is the common public header for NewMad.
int8_t nm_trk_id_t
ID of a track, assigned in order.
uint64_t nm_req_seq_t
sequence number for requests
int32_t nm_prio_t
message priority
uint64_t nm_tag_t
user tags, 64 bits, contained in indirect hashtable
uint64_t nm_len_t
data length used by nmad
uint32_t nm_seq_t
Sequence number for packets on a given gate/tag.
uint8_t nm_proto_t
protocol flags- not part of the public API, but needed for inline
enum nm_trk_kind_e nm_trk_kind_t
matching info for global monitors
nm_core_tag_t tag_mask
the mask to apply before comparing tags (only bits set in mask will be checked)
nm_core_tag_t tag
the tag to listen too
nm_gate_t p_gate
the gate to listen to, or NM_ANY_GATE for any
An event, generated by the NewMad core.
struct nm_req_s * p_req
the request that matched the event- NULL in case of unexpected packets
nm_status_t status
status flags- describe the event
exposed here for inlining; do not use this value, use the accessor nm_core_get_singleton()
nm_core_t p_core_singleton
global monitor for status transitions
struct nm_monitor_s monitor
the monitor to fire upon matching event
struct nm_core_event_matching_s matching
packet matching information
nm_tag_t tag
the user-supplied tag
nm_session_hash_t hashcode
the session hashcode
asynchronous tasks for nmad core.
struct nm_core_task_s::@11::@17 handler
struct nm_core_task_s::@11::@13 completed_pw
enum nm_core_task_kind_e kind
struct nm_core_task_s::@11::@16 rtr_send
struct nm_core_task_s::@11::@12 unpack_next
union nm_core_task_s::@11 content
struct nm_pkt_wrap_s * p_pw
struct nm_core_task_s::@11::@15 pack_submission
struct nm_matching_container_s matching
struct nm_req_chunk_s * p_req_chunk
struct nm_core_task_s::@11::@14 completed_prefetch
Core NewMadeleine structure.
block of static properties for a given data descriptor
a data descriptor, used to pack/unpack data from app layout to/from contiguous buffers
const char * url
driver url, as string
const struct nm_minidriver_iface_s * driver
Driver interface, for use when no instance is needed.
Connection to another process.
status of tags on each gate
containers for matching info, used for caching
struct nm_gtag_s * p_gtag
cache of gtag
struct nm_matching_gsession_s * p_gsession
cache of matching gsession
struct nm_matching_wildcard_s * p_wildcard
cache of matching wildcard
struct nm_matching_tag_s * p_matching_tag
cache of matching tag
struct to store matching info for any-source requests of a given tag
struct to store matching info for wildcard requests, one per session
generic monitor, used for requests and for global events (with matching)
nm_status_t event_mask
mask applied to status to check whether to fire events
nm_core_event_notifier_t p_notifier
notification function called to fire events
void * ref
opaque user-supplied pointer passed to notifier
PUK_LIST_LINK(nm_req_chunk)
nm_len_t chunk_offset
offset of the chunk relative to the full data in the req
struct nm_core_task_s core_task
nm_proto_t proto_flags
pre-computed proto flags
struct nm_data_properties_s chunk_props
properties of the data chunk
nm_len_t chunk_len
length of the chunk
struct nm_req_s * p_req
link to insert the req chunk as a core task
a generic pack/unpack request
struct nm_req_s::@18::@21 unpack
nm_core_tag_t tag
tag to send to/from (works in combination with tag_mask for recv)
nm_len_t expected_len
length of posted recv (may be updated if matched packet is shorter)
PUK_LIST_LINK(nm_req)
link to enqueue req in pending requests lists
nm_gate_t p_gate
dest/src gate; NULL if recv from any source
struct nm_matching_container_s matching
link to store request in a matching map
nm_len_t done
cumulated length of data sent so far
nm_prio_t priority
request priority level
uint32_t checksum
data checkusm when pack was submitted- for debug only
struct nm_req_pchunk_s * p_next
struct nm_req_s::@18::@21::@22::nm_req_pchunk_s * p_pchunks
unsorted list of arrived chunks; reads are lock-free, writes are within core_core_lock sections
struct nm_gtag_s * p_gtag
cache for tag status on gate; NULL if tag or gate is unspecified yet
nm_len_t offset
offset of data partially received
nm_cond_status_t status
status, including status bits and synchronization
struct nm_req_s::@18::@21::@22 partition
partitioned unpack, used only if NM_REQ_FLAG_UNPACK_PARTITIONED is set
nm_req_seq_t req_seq
request sequence number used to interleave wildcard/non-wildcard requests
struct nm_req_chunk_s req_chunk
preallocated chunk for the common case (single-chunk)
nm_req_flag_t flags
flags given by user
struct nm_data_s data
data descriptor to send/recv
struct nm_req_s::@18::@20 pack
struct nm_pkt_wrap_s * p_prefetch_pw
packet wrapper to prefetch recv
nm_core_tag_t tag_mask
mask applied to tag for matching (only bits in mask need to match)
nm_len_t cumulated_len
amount of data unpacked so far
nm_len_t len
cumulated data length
int err
error status of the request
struct nm_monitor_s monitor
monitor attached to this request (only 1)
nm_seq_t seq
packet sequence number on the given tag
nm_len_t hlen
length of header to send eagerly