NewMadeleine

Documentation

« back to PM2 home.
nm_core_interface.h
Go to the documentation of this file.
1/*
2 * NewMadeleine
3 * Copyright (C) 2006-2026 (see AUTHORS file)
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15
16
17#ifndef NM_CORE_INTERFACE_H
18#define NM_CORE_INTERFACE_H
19
24#include <nm_public.h>
25#include <nm_log.h>
26#include <Padico/Puk.h>
27#include <sys/uio.h>
28
29#ifdef PIOMAN
30#include <pioman.h>
31#else
32#include <pthread.h>
33#endif
34
58/* ** Core init ******************************************** */
59
60typedef struct nm_core*nm_core_t;
61
62puk_component_t nm_core_component_load(const char*entity, const char*name);
63
64int nm_core_init(nm_core_t *pp_core);
65
66int nm_core_set_strategy(nm_core_t p_core, puk_component_t strategy);
67
69
72
78
80static inline nm_core_t nm_core_get_singleton(void)
81{
83}
84
87
88
89/* ** Drivers ********************************************** */
90
92typedef struct nm_drv_s*nm_drv_t;
93
95
97 nm_drv_t *pp_drv, const char**p_url);
98
99
100/* ** Gates ************************************************ */
101
108
109
110/* ** Threads ********************************************** */
111
120
124
129
130/* ** Progression ****************************************** */
131
133
136/* ** Status *********************************************** */
137
146#ifdef PIOMAN
148typedef piom_cond_value_t nm_status_t;
150typedef piom_cond_t nm_cond_status_t;
151#else /* PIOMAN */
153typedef uint32_t nm_status_t;
156#endif /* PIOMAN */
157
158/* ** status and flags, used in pack/unpack requests and events */
159
161#define NM_STATUS_NONE ((nm_status_t)0x00000000)
163#define NM_STATUS_PACK_INIT ((nm_status_t)0x00000001)
165#define NM_STATUS_UNPACK_INIT ((nm_status_t)0x00000002)
167#define NM_STATUS_PACK_COMPLETED ((nm_status_t)0x00000004)
169#define NM_STATUS_UNPACK_COMPLETED ((nm_status_t)0x00000008)
171#define NM_STATUS_UNEXPECTED ((nm_status_t)0x00000010)
173#define NM_STATUS_UNPACK_CANCELLED ((nm_status_t)0x00000020)
175#define NM_STATUS_PACK_POSTED ((nm_status_t)0x00000040)
177#define NM_STATUS_UNPACK_POSTED ((nm_status_t)0x00000080)
179#define NM_STATUS_ACK_RECEIVED ((nm_status_t)0x00000100)
181#define NM_STATUS_UNPACK_DATA0 ((nm_status_t)0x00000200)
183#define NM_STATUS_UNPACK_DATA_SIZE ((nm_status_t)0x00000400)
185#define NM_STATUS_FINALIZED ((nm_status_t)0x00000800)
187#define NM_STATUS_ERROR ((nm_status_t)0x00001000)
189#define NM_STATUS_PACK_MSG_SIZE ((nm_status_t)0x00002000)
191#define NM_STATUS_UNPACK_PREFETCHED ((nm_status_t)0x00004000)
192
194#define NM_STATUS_MASK_FULL ((nm_status_t)-1)
195
196
198typedef uint32_t nm_req_flag_t;
199
201#define NM_REQ_FLAG_NONE ((nm_req_flag_t)0x00000000)
203#define NM_REQ_FLAG_PACK_SYNCHRONOUS ((nm_req_flag_t)0x00001000)
205#define NM_REQ_FLAG_PACK ((nm_req_flag_t)0x00002000)
207#define NM_REQ_FLAG_UNPACK ((nm_req_flag_t)0x00004000)
209#define NM_REQ_FLAG_UNPACK_DATA_INFO ((nm_req_flag_t)0x00008000)
211#define NM_REQ_FLAG_UNPACK_MATCHING_INFO ((nm_req_flag_t)0x00010000)
213#define NM_REQ_FLAG_UNPACK_PREFETCHING ((nm_req_flag_t)0x00020000)
215#define NM_REQ_FLAG_MATCHING_WILDCARD ((nm_req_flag_t)0x00100000)
217#define NM_REQ_FLAG_MATCHING_GATE ((nm_req_flag_t)0x00200000)
219#define NM_REQ_FLAG_MATCHING_TAG ((nm_req_flag_t)0x00400000)
221#define NM_REQ_FLAG_MATCHING_FULL ((nm_req_flag_t)0x00800000)
223#define NM_REQ_FLAG_FINALIZE_LATER ((nm_req_flag_t)0x01000000)
225#define NM_REQ_FLAG_UNPACK_PARTITIONED ((nm_req_flag_t)0x02000000)
227#define NM_REQ_FLAG_PACK_PARTITIONED ((nm_req_flag_t)0x04000000)
228
230typedef uint32_t nm_req_chunk_flag_t;
231
233#define NM_REQ_CHUNK_FLAG_NONE ((nm_req_chunk_flag_t)0x00000000)
235#define NM_REQ_CHUNK_FLAG_SHORT ((nm_req_chunk_flag_t)0x00020000)
237#define NM_REQ_CHUNK_FLAG_USE_COPY ((nm_req_chunk_flag_t)0x00080000)
239#define NM_REQ_CHUNK_FLAG_DATA_ITERATOR ((nm_req_chunk_flag_t)0x00100000)
240
243/* ** tags ************************************************* */
244
254typedef uint32_t nm_session_hash_t;
255
257#define NM_CORE_TAG_HASH_FULL ((nm_session_hash_t)0xFFFFFFFF)
258
265
267
268#define NM_CORE_TAG_MASK_FULL ((nm_core_tag_t){ .tag = NM_TAG_MASK_FULL, .hashcode = NM_CORE_TAG_HASH_FULL })
269#define NM_CORE_TAG_NONE ((nm_core_tag_t){ .tag = 0, .hashcode = 0x0 })
270
272{
273 nm_core_tag_t core_tag;
274 core_tag.tag = tag;
275 core_tag.hashcode = hashcode;
276 return core_tag;
277}
279{
280 return core_tag.tag;
281}
283{
284 return core_tag.hashcode;
285}
286
290/* ** Event notification *********************************** */
291
310
312typedef void (*nm_core_event_notifier_t)(const struct nm_core_event_s*const event, void*ref);
313
321
329
341void nm_core_req_monitor(struct nm_core*p_core, struct nm_req_s*p_req, struct nm_monitor_s monitor);
342
343
345#define NM_EVENT_MATCHING_ANY ((struct nm_core_event_matching_s){ .p_gate = NM_ANY_GATE, .tag = NM_CORE_TAG_NONE, .tag_mask = NM_CORE_TAG_NONE })
346
347#define NM_MONITOR_NULL ((struct nm_monitor_s){ .p_notifier = NULL, .event_mask = 0, .ref = NULL })
348
349#define NM_CORE_MONITOR_NULL ((struct nm_core_monitor_s){ .monitor = NM_MONITOR_NULL, .matching = NM_EVENT_MATCHING_ANY })
350
353/* ** Core tasks ******************************************* */
354
378
387#define NM_MATCHING_CONTAINER_NULL ((struct nm_matching_container_s) { NULL })
388
401{
403 union
404 {
405 struct
406 {
410 struct
411 {
414 struct
415 {
416 struct nm_pkt_wrap_s*p_pw;
418 struct
419 {
420 struct nm_pkt_wrap_s*p_pw;
422 struct
423 {
426 struct
427 {
428 struct nm_pkt_wrap_s*p_pw;
430 struct
431 {
432 void (*p_handler)(void);
435};
436
440void nm_core_task_submit_locked(struct nm_core*p_core, void (*p_handler)(void));
441
445void nm_core_task_submit_unlocked(struct nm_core*p_core, void (*p_handler)(void));
446
449/* ** Packs/unpacks **************************************** */
450
464
466
478
480
527
529void nm_core_pack_init(struct nm_core*p_core, struct nm_req_s*p_pack);
530
532void nm_core_pack_data(nm_core_t p_core, struct nm_req_s*p_pack, const struct nm_data_s*p_data);
533
536
538void nm_core_pack_submit(struct nm_core*p_core, struct nm_req_s*p_pack);
539
541void nm_core_pack_set_priority(struct nm_core*p_core, struct nm_req_s*p_pack, nm_prio_t priority);
542
544static inline void nm_core_pack_set_hlen(struct nm_core*p_core __attribute__((unused)), struct nm_req_s*p_pack, nm_len_t hlen)
545{
546 p_pack->pack.hlen = hlen;
547}
548
549void nm_core_pack_submit_chunks(struct nm_core*p_core, struct nm_req_s*p_pack, int n, const struct nm_chunk_s*p_chunks);
550
552void nm_core_unpack_init(struct nm_core*p_core, struct nm_req_s*p_unpack);
553
555void nm_core_unpack_offset(struct nm_core*p_core, struct nm_req_s*p_unpack, nm_len_t offset);
556
558void nm_core_unpack_data(struct nm_core*p_core, struct nm_req_s*p_unpack, const struct nm_data_s*p_data);
559
562
564void nm_core_unpack_match_event(struct nm_core*p_core, struct nm_req_s*p_unpack, const struct nm_core_event_s*p_event);
565
567void nm_core_unpack_submit(struct nm_core*p_core, struct nm_req_s*p_unpack, nm_req_flag_t flags);
568
570int nm_core_unpack_peek(struct nm_core*p_core, struct nm_req_s*p_unpack, const struct nm_data_s*p_data,
571 nm_len_t peek_offset, nm_len_t peek_len);
572
575int nm_core_unpack_iprobe(struct nm_core*p_core, struct nm_req_s*p_unpack);
576
580int nm_core_unpack_cancel(struct nm_core*p_core, struct nm_req_s*p_unpack);
581
583int nm_core_iprobe(struct nm_core*p_core,
585 nm_gate_t *pp_out_gate, nm_core_tag_t*p_out_tag, nm_len_t*p_out_size);
586
588void nm_core_flush(struct nm_core*p_core);
589
590
591/* ** Packet injection from outside of nmad core */
592
595
602typedef void(*nm_injector_pull_data_t)(struct nm_req_s*p_req, const struct nm_data_s*p_data, nm_len_t chunk_offset, nm_len_t chunk_len, void*p_ref);
603
607 nm_len_t chunk_offset, nm_len_t chunk_len, int is_last_chunk,
608 nm_injector_pull_data_t p_pull_data, void*p_ref);
609
612
614void nm_core_inject_finalize(struct nm_core*p_core, struct nm_req_s*p_req);
615
618
619/* ** partitioned unpack */
620
621void nm_core_unpack_partition_set(struct nm_req_s*p_unpack, int n_partitions);
622
624
625int nm_core_unpack_partition_test(struct nm_req_s*p_unpack, int partition);
626
630/* ** synchronization primitives *************************** */
631
645static inline void nm_cond_init(nm_cond_status_t*p_cond, nm_status_t bitmask);
646
648static inline void nm_cond_destroy(nm_cond_status_t*p_cond);
649
651static inline nm_status_t nm_cond_test(const nm_cond_status_t*p_cond, nm_status_t bitmask);
652
654static inline nm_status_t nm_cond_test_locked(const nm_cond_status_t*p_cond, nm_status_t bitmask);
655
658static inline void nm_cond_add(nm_cond_status_t*p_cond, nm_status_t bitmask);
659
661static inline void nm_cond_wait(nm_cond_status_t*p_cond, nm_status_t bitmask, nm_core_t p_core);
662
664static inline void nm_cond_signal(nm_cond_status_t*p_cond, nm_status_t bitmask);
665
667static inline void nm_cond_wait_all(void**pp_conds, int n, uintptr_t offset, nm_status_t bitmask, nm_core_t p_core);
668
669#if defined(PIOMAN)
670
672static inline void nm_cond_init(nm_cond_status_t*p_cond, nm_status_t bitmask)
673{
674 piom_cond_init(p_cond, bitmask);
675}
677static inline void nm_cond_destroy(nm_cond_status_t*p_cond)
678{
679 piom_cond_destroy(p_cond);
680}
682static inline nm_status_t nm_cond_test(const nm_cond_status_t*p_cond, nm_status_t bitmask)
683{
684 return piom_cond_test(p_cond, bitmask);
685}
687static inline nm_status_t nm_cond_test_locked(const nm_cond_status_t*p_cond, nm_status_t bitmask)
688{
689 return piom_cond_test_locked((nm_cond_status_t*)p_cond, bitmask);
690}
692static inline void nm_cond_add(nm_cond_status_t*p_cond, nm_status_t bitmask)
693{
694 piom_cond_add(p_cond, bitmask);
695}
697static inline void nm_cond_mask(nm_cond_status_t*p_cond, nm_status_t bitmask)
698{
699 piom_cond_mask(p_cond, bitmask);
700}
702static inline void nm_cond_wait(nm_cond_status_t*p_cond, nm_status_t bitmask, nm_core_t p_core __attribute__((unused)))
703{
704 piom_cond_wait(p_cond, bitmask);
705}
707static inline void nm_cond_signal(nm_cond_status_t*p_cond, nm_status_t bitmask)
708{
709 piom_cond_signal(p_cond, bitmask);
710}
712static inline void nm_cond_wait_all(void**pp_conds, int n, uintptr_t offset, nm_status_t bitmask, nm_core_t p_core __attribute__((unused)))
713{
714 piom_cond_wait_all(pp_conds, n, offset, bitmask);
715}
716#else /* PIOMAN */
718static inline void nm_cond_init(nm_cond_status_t*p_cond, nm_status_t bitmask)
719{
720 *p_cond = bitmask;
721}
723static inline void nm_cond_destroy(nm_cond_status_t*p_cond __attribute__((unused)))
724{
725}
727static inline nm_status_t nm_cond_test(const nm_cond_status_t*p_cond, nm_status_t bitmask)
728{
729 return ((*p_cond) & bitmask);
730}
733{
734 return nm_cond_test(p_cond, bitmask);
735}
737static inline void nm_cond_add(nm_cond_status_t*p_cond, nm_status_t bitmask)
738{
739 *p_cond |= bitmask;
740}
742static inline void nm_cond_mask(nm_cond_status_t*p_cond, nm_status_t bitmask)
743{
744 *p_cond &= ~bitmask;
745}
747static inline void nm_cond_signal(nm_cond_status_t*p_cond, nm_status_t bitmask)
748{
749 *p_cond |= bitmask;
750}
752static inline void nm_cond_wait(nm_cond_status_t*p_cond, nm_status_t bitmask, nm_core_t p_core)
753{
754 while(!nm_cond_test(p_cond, bitmask))
755 {
756 nm_schedule(p_core);
757 }
758}
760static inline void nm_cond_wait_all(void**pp_conds, int n, uintptr_t offset, nm_status_t bitmask, nm_core_t p_core)
761{
762 int i;
763 for(i = 0; i < n; i++)
764 {
765 if(pp_conds[i] != NULL)
766 {
767 nm_cond_status_t*p_cond = (nm_cond_status_t*)((uintptr_t)pp_conds[i] + offset);
768 nm_cond_wait(p_cond, bitmask, p_core);
769 }
770 }
771}
772#endif /* PIOMAN */
773
776/* ** convenient frontends to deal with status in requests */
777
787static inline void nm_status_init(struct nm_req_s*p_req, nm_status_t bitmask)
788{
789 nm_cond_init(&p_req->status, bitmask);
790}
791static inline void nm_status_destroy(struct nm_req_s*p_req)
792{
793 nm_cond_destroy(&p_req->status);
794}
796static inline nm_status_t nm_status_test(const struct nm_req_s*p_req, nm_status_t bitmask)
797{
798 if(bitmask & NM_STATUS_FINALIZED) /* status FINALIZED needs strong consistency to avoid use after free */
799 return nm_cond_test_locked(&p_req->status, bitmask);
800 else
801 return nm_cond_test(&p_req->status, bitmask);
802}
804static inline void nm_status_add(struct nm_req_s*p_req, nm_status_t bitmask)
805{
806 nm_cond_add(&p_req->status, bitmask);
807}
809static inline void nm_status_unset(struct nm_req_s*p_req, nm_status_t bitmask)
810{
811 nm_cond_mask(&p_req->status, bitmask);
812}
814static inline void nm_status_wait(struct nm_req_s*p_req, nm_status_t bitmask, nm_core_t p_core)
815{
816 nm_cond_wait(&p_req->status, bitmask, p_core);
817 assert(nm_status_test(p_req, bitmask) != 0);
818}
823static inline void nm_status_signal(struct nm_req_s*p_req, nm_status_t bitmask)
824{
825 nm_cond_signal(&p_req->status, bitmask);
826}
828static inline void nm_status_wait_all(void**pp_reqs, int n, uintptr_t offset,
829 nm_status_t bitmask, nm_core_t p_core)
830{
831 const struct nm_req_s*p_req = NULL;
832 const uintptr_t status_offset = (uintptr_t)&p_req->status - (uintptr_t)p_req; /* offset of 'status' in nm_req_s */
833 nm_cond_wait_all(pp_reqs, n, offset + status_offset, bitmask, p_core);
834}
835static inline void nm_status_assert(struct nm_req_s*p_req __attribute__((unused)), nm_status_t value __attribute__((unused)))
836{
838}
839
840static inline void nm_status_spinwait(struct nm_req_s*p_req, nm_status_t status)
841{
842 while(!nm_status_test(p_req, status))
843 { /* bust wait*/ }
844}
846static inline int nm_status_test_allbits(struct nm_req_s*p_req, nm_status_t bitmask)
847{
848 return (nm_status_test(p_req, bitmask) == bitmask);
849}
850
853/* ** frontends for atomic ops ***************************** */
854
865static inline void nm_mem_fence_always(void)
866{
867 __sync_synchronize();
868}
869
871static inline void nm_mem_fence(void)
872{
873#if defined(PIOMAN_MULTITHREAD)
874 __sync_synchronize();
875#else
876 nm_core_t p_core = nm_core_get_singleton();
878 {
879 __sync_synchronize();
880 }
881#endif /* PIOMAN_MULTITHREAD */
882}
883
885static inline int nm_atomic_inc(int*v)
886{
887#if defined(PIOMAN_MULTITHREAD)
888 return __sync_fetch_and_add(v, 1);
889#else
890 nm_core_t p_core = nm_core_get_singleton();
892 {
893 return __sync_fetch_and_add(v, 1);
894 }
895 else
896 {
897 return (*v)++;
898 }
899#endif /* PIOMAN_MULTITHREAD */
900}
901
903static inline int nm_atomic_alway_inc(int*v)
904{
905 return __sync_fetch_and_add(v, 1);
906}
907
909static inline int nm_atomic_dec(int*v)
910{
911#if defined(PIOMAN_MULTITHREAD)
912 return __sync_sub_and_fetch(v, 1);
913#else
914 nm_core_t p_core = nm_core_get_singleton();
916 {
917 return __sync_sub_and_fetch(v, 1);
918 }
919 else
920 {
921 return --(*v);
922 }
923#endif /* PIOMAN_MULTITHREAD */
924}
925
927static inline int nm_atomic_always_dec(int*v)
928{
929 return __sync_sub_and_fetch(v, 1);
930}
931
933static inline void nm_atomic_add(int*v, int v2)
934{
935#if defined(PIOMAN_MULTITHREAD)
936 __sync_fetch_and_add(v, v2);
937#else
938 nm_core_t p_core = nm_core_get_singleton();
940 {
941 __sync_fetch_and_add(v, v2);
942 }
943 else
944 {
945 (*v) += v2;
946 }
947#endif /* PIOMAN_MULTITHREAD */
948}
949
951static inline void nm_atomic_always_add(int*v, int v2)
952{
953 __sync_fetch_and_add(v, v2);
954}
955
957static inline int nm_atomic_compare_and_swap(int*v, int oldval, int newval)
958{
959#if defined(PIOMAN_MULTITHREAD)
960 return __sync_bool_compare_and_swap(v, oldval, newval);
961#else
962 nm_core_t p_core = nm_core_get_singleton();
964 {
965 return __sync_bool_compare_and_swap(v, oldval, newval);
966 }
967 else
968 {
969 if(*v == oldval)
970 {
971 *v = newval;
972 return 1;
973 }
974 else
975 {
976 return 0;
977 }
978 }
979#endif /* PIOMAN_MULTITHREAD */
980}
981
983static inline int nm_atomic_always_compare_and_swap(int*v, int oldval, int newval)
984{
985 return __sync_bool_compare_and_swap(v, oldval, newval);
986}
987
990/* ** frontend for generic locking ************************* */
991
1008#ifdef PIOMAN
1009typedef piom_spinlock_t nm_spinlock_t;
1010#else /* PIOMAN */
1012{
1013#ifdef NMAD_DEBUG
1014 int lock;
1015 pthread_t last_tid;
1016#endif /* NMAD_DEBUG */
1017};
1019#endif /* PIOMAN */
1020
1022static inline void nm_spin_init(nm_spinlock_t*p_spin);
1023
1025static inline void nm_spin_destroy(nm_spinlock_t*p_spin);
1026
1028static inline void nm_spin_lock(nm_spinlock_t*p_spin);
1029
1031static inline void nm_spin_unlock(nm_spinlock_t*p_spin);
1032
1036static inline int nm_spin_trylock(nm_spinlock_t*p_spin);
1037
1039static inline void nm_spin_assert_locked(nm_spinlock_t*p_spin);
1040
1042static inline void nm_spin_assert_notlocked(nm_spinlock_t*p_spin);
1043
1044
1046static inline void nm_spin_check_nothread(nm_spinlock_t*p_spin __attribute__((unused)))
1047{
1048#if defined(NMAD_DEBUG) && !defined(PIOMAN)
1049 nm_core_t p_core = nm_core_get_singleton();
1051 if(p_spin->last_tid == (pthread_t)0)
1052 {
1053 p_spin->last_tid = pthread_self();
1054 __sync_synchronize();
1055 }
1056 else
1057 {
1058 if(p_spin->last_tid != pthread_self())
1059 {
1060 NM_FATAL("detected calls from multiple threads in non-threaded mode. Please use pioman-enabled build for multi-threaded use or give thread level using nm_core_set_thread_level(NM_THREAD_SERIALIZED) for serialized thread level.");
1061 }
1062 }
1063#endif /* NMAD_DEBUG && !PIOMAN */
1064}
1065
1067static inline void nm_spin_clear_nothread(nm_spinlock_t*p_spin __attribute__((unused)))
1068{
1069#if defined(NMAD_DEBUG) && !defined(PIOMAN)
1070 nm_core_t p_core = nm_core_get_singleton();
1072 if(p_spin->last_tid == 0)
1073 {
1074 NM_FATAL("unlocking while no thread is holding the lock.");
1075 }
1076 else if(p_spin->last_tid != pthread_self())
1077 {
1078 NM_WARN("unlocking from another thread than where lock was acquired.\n");
1079 }
1081 {
1082 p_spin->last_tid = (pthread_t)0;
1083 }
1084#endif /* NMAD_DEBUG && !PIOMAN */
1085}
1086
1087static inline void nm_spin_init(nm_spinlock_t*p_spin __attribute__((unused)))
1088{
1089#ifdef PIOMAN
1090 piom_spin_init(p_spin);
1091#else /* PIOMAN */
1092#ifdef NMAD_DEBUG
1093 p_spin->lock = 0;
1094 p_spin->last_tid = 0;
1095#endif /* NMAD_DEBUG */
1096#endif /* PIOMAN */
1097}
1098
1099static inline void nm_spin_destroy(nm_spinlock_t*p_spin __attribute__((unused)))
1100{
1101#ifdef PIOMAN
1102 piom_spin_destroy(p_spin);
1103#else /* PIOMAN */
1104#ifdef NMAD_DEBUG
1105 assert(p_spin->lock == 0);
1106#endif /* NMAD_DEBUG */
1107#endif /* PIOMAN */
1108}
1109
1110static inline void nm_spin_lock(nm_spinlock_t*p_spin __attribute__((unused)))
1111{
1112#ifdef PIOMAN
1113 piom_spin_lock(p_spin);
1114#else /* PIOMAN */
1115#ifdef NMAD_DEBUG
1116 __sync_synchronize();
1117 if(p_spin->lock != 0)
1118 {
1119 NM_FATAL("spinlock is not free in nm_spin_lock(); detected concurrent access from thread = %p. Suspecting multi-threaded use by the application while library is initialized in non-threaded mode.\n",
1120 (void*)p_spin->last_tid);
1121 }
1122 p_spin->lock = 1;
1123#endif /* NMAD_DEBUG */
1124 nm_spin_check_nothread(p_spin);
1125 nm_core_t p_core = nm_core_get_singleton();
1127 {
1128 __sync_synchronize();
1129 }
1130#endif /* PIOMAN */
1131}
1132
1133static inline void nm_spin_unlock(nm_spinlock_t*p_spin __attribute__((unused)))
1134{
1135#ifdef PIOMAN
1136 piom_spin_unlock(p_spin);
1137#else /* PIOMAN */
1138 nm_spin_clear_nothread(p_spin);
1139#ifdef NMAD_DEBUG
1140 __sync_synchronize();
1141 assert(p_spin->lock == 1);
1142 p_spin->lock = 0;
1143#endif /* NMAD_DEBUG */
1144 nm_core_t p_core = nm_core_get_singleton();
1146 {
1147 __sync_synchronize();
1148 }
1149#endif /* PIOMAN */
1150}
1151
1152static inline int nm_spin_trylock(nm_spinlock_t*p_spin __attribute__((unused)))
1153{
1154#ifdef PIOMAN
1155 return piom_spin_trylock(p_spin);
1156#else /* PIOMAN */
1157 int rc = 1;
1158#ifdef NMAD_DEBUG
1159 __sync_synchronize();
1160 if(p_spin->lock)
1161 {
1162 assert(p_spin->lock == 1);
1163 rc = 0;
1164 }
1165 else
1166 {
1167 rc = 1;
1168 p_spin->lock = 1;
1169 nm_spin_check_nothread(p_spin);
1170 }
1171#endif /* NMAD_DEBUG */
1172 nm_core_t p_core = nm_core_get_singleton();
1174 {
1175 __sync_synchronize();
1176 }
1177 return rc;
1178#endif /* PIOMAN */
1179}
1180
1181static inline void nm_spin_assert_locked(nm_spinlock_t*p_spin __attribute__((unused)))
1182{
1183#ifdef PIOMAN
1184 piom_spin_assert_locked(p_spin);
1185#else /* PIOMAN */
1186#ifdef NMAD_DEBUG
1187 assert(p_spin->lock == 1);
1188 assert(p_spin->last_tid == pthread_self());
1189#endif /* NMAD_DEBUG */
1190#endif /* PIOMAN */
1191}
1192
1193static inline void nm_spin_assert_notlocked(nm_spinlock_t*p_spin __attribute__((unused)))
1194{
1195#ifdef PIOMAN
1196 piom_spin_assert_notlocked(p_spin);
1197#else /* PIOMAN */
1198#ifdef NMAD_DEBUG
1199 assert(p_spin->lock == 0);
1200#endif /* NMAD_DEBUG */
1201#endif /* PIOMAN */
1202}
1203
1206/* ** refcount ********************************************* */
1207
1209PUK_LIST_TYPE(nm_refcount_holder,
1210 const void*p_holder;
1211 const char*func;
1212 const char*file;
1213 int line;
1214 );
1215
1221{
1223#ifdef NMAD_DEBUG
1224 char*object_id;
1225 nm_spinlock_t lock;
1226 struct nm_refcount_holder_list_s holders;
1227#endif /* NMAD_DEBUG */
1228};
1229
1230static inline void nm_refcount_dump(struct nm_refcount_s*p_refcount __attribute__((unused)))
1231{
1232#ifdef NMAD_DEBUG
1233 nm_refcount_holder_itor_t i;
1234 puk_list_foreach(nm_refcount_holder, i, &p_refcount->holders)
1235 {
1236 NM_WARN("pending ref- object = %s; p_holder = %p; %s:%d %s()\n",
1237 p_refcount->object_id, i->p_holder, i->file, i->line, i->func);
1238 }
1239#endif /* NMAD_DEBUG */
1240}
1241
1242static inline void nm_refcount_destroy(struct nm_refcount_s*p_refcount __attribute__((unused)))
1243{
1244#ifdef NMAD_DEBUG
1245 nm_refcount_dump(p_refcount);
1246 nm_spin_destroy(&p_refcount->lock);
1247 padico_free(p_refcount->object_id);
1248 nm_refcount_holder_list_destroy(&p_refcount->holders);
1249#endif /* NMAD_DEBUG */
1250}
1251
1252static inline int nm_refcount_get(struct nm_refcount_s*p_refcount)
1253{
1254 return p_refcount->refcount;
1255}
1256
1257#define nm_refcount_inc(REFCOUNT, HOLDER) \
1258 nm_refcount_inc_internal(REFCOUNT, HOLDER, __FUNCTION__, __FILE__, __LINE__)
1259
1260#ifdef NMAD_DEBUG
1261static inline void nm_refcount_add_holder(struct nm_refcount_s*p_refcount, const void*p_holder,
1262 const char*func, const char*file, const int line)
1263{
1264 struct nm_refcount_holder_s*h = nm_refcount_holder_new();
1265 h->p_holder = p_holder;
1266 h->func = func;
1267 h->file = file;
1268 h->line = line;
1269 nm_refcount_holder_list_push_back(&p_refcount->holders, h);
1270}
1271
1272static inline int nm_refcount_count_holder(struct nm_refcount_s*p_refcount, const void*p_holder)
1273{
1274 int count = 0;
1275 nm_refcount_holder_itor_t i;
1276 puk_list_foreach(nm_refcount_holder, i, &p_refcount->holders)
1277 {
1278 if(i->p_holder == p_holder)
1279 count++;
1280 }
1281 return count;
1282}
1283#endif /* NMAD_DEBUG */
1284
1285static inline void nm_refcount_inc_internal(struct nm_refcount_s*p_refcount, const void*p_holder __attribute__((unused)),
1286 const char*func __attribute__((unused)),
1287 const char*file __attribute__((unused)),
1288 const int line __attribute__((unused)))
1289{
1290 assert(p_refcount->refcount >= 0);
1291 nm_atomic_inc(&p_refcount->refcount);
1292#ifdef NMAD_DEBUG
1293 if(p_holder != NULL)
1294 {
1295 nm_spin_lock(&p_refcount->lock);
1296#if 0
1297 if(nm_refcount_count_holder(p_refcount, p_holder) > 0)
1298 {
1299 nm_refcount_holder_itor_t i;
1300 PUK_LIST_FIND(nm_refcount_holder, i, &p_refcount->holders, (i->p_holder == p_holder));
1301 NM_WARN("p_holder = %p already holding a ref to p_refcount = %p (%s)\n"
1302 " new holder: %s:%d %s()\n"
1303 " previous holder: %s:%d %s()\n",
1304 p_holder, p_refcount, p_refcount->object_id,
1305 file, line, func, i->file, i->line, i->func);
1306 }
1307#endif /* 0 */
1308 nm_refcount_add_holder(p_refcount, p_holder, func, file, line);
1309 nm_spin_unlock(&p_refcount->lock);
1310 }
1311#endif /* NMAD_DEBUG */
1312}
1313
1315static inline int nm_refcount_dec(struct nm_refcount_s*p_refcount, const void*p_holder __attribute__((unused)))
1316{
1317 const int nb_ref = nm_atomic_dec(&p_refcount->refcount);
1318 assert(nb_ref >= 0);
1319#ifdef NMAD_DEBUG
1320 if(p_holder != NULL)
1321 {
1322 nm_spin_lock(&p_refcount->lock);
1323 const int count = nm_refcount_count_holder(p_refcount, p_holder);
1324 nm_refcount_holder_itor_t i;
1325 PUK_LIST_FIND(nm_refcount_holder, i, &p_refcount->holders, (i->p_holder == p_holder));
1326 if(i == NULL)
1327 {
1328 NM_FATAL("p_holder = %p not holding ref to p_refcount = %p (%s); cannot release\n",
1329 p_holder, p_refcount, p_refcount->object_id);
1330 }
1331 else
1332 {
1333 nm_refcount_holder_list_remove(&p_refcount->holders, i);
1334 nm_refcount_holder_delete(i);
1335 }
1336 assert(nm_refcount_count_holder(p_refcount, p_holder) == count - 1);
1337 nm_spin_unlock(&p_refcount->lock);
1338 }
1339#endif /* NMAD_DEBUG */
1340 return nb_ref;
1341}
1342
1347static inline void nm_refcount_init(struct nm_refcount_s*p_refcount, char*p_object_id, const void*p_init_holder)
1348{
1349 p_refcount->refcount = 1;
1350#ifdef NMAD_DEBUG
1351 p_refcount->object_id = p_object_id;
1352 nm_spin_init(&p_refcount->lock);
1353 nm_refcount_holder_list_init(&p_refcount->holders);
1354 if(p_init_holder != NULL)
1355 {
1356 nm_refcount_add_holder(p_refcount, p_init_holder, __FUNCTION__, __FILE__, __LINE__);
1357 }
1358#else /* NMAD_DEBUG */
1359 if(p_object_id != NULL)
1360 padico_free(p_object_id); /* when not in debug, we won't use the object_id at all */
1361#endif /* NMAD_DEBUG */
1362}
1363
1364
1365#endif /* NM_CORE_INTERFACE_H */
static int nm_atomic_compare_and_swap(int *v, int oldval, int newval)
boolean int compare and swap, atomic only when multithread
static void nm_atomic_always_add(int *v, int v2)
int add, always atomic
static int nm_atomic_dec(int *v)
decrement int, atomic only when multithread
static int nm_atomic_always_dec(int *v)
decrement int, always atomic
static int nm_atomic_inc(int *v)
increment int, atomic only when multithread
static void nm_mem_fence_always(void)
memory fence, always
static int nm_atomic_always_compare_and_swap(int *v, int oldval, int newval)
boolean int compare and swap, always atomic
static void nm_mem_fence(void)
memory fence only when multithread
static void nm_atomic_add(int *v, int v2)
int add, atomic only when multithread
static int nm_atomic_alway_inc(int *v)
increment int, always atomic
static void nm_cond_init(nm_cond_status_t *p_cond, nm_status_t bitmask)
initialize a nm_cond_status_t object
static nm_status_t nm_cond_test(const nm_cond_status_t *p_cond, nm_status_t bitmask)
test whether the given bit is set in the status; unlocked, weak consistency
static void nm_cond_add(nm_cond_status_t *p_cond, nm_status_t bitmask)
add a bit to the bitmask in the status, do not unlock waiters (for bits that will not be waited for)
static void nm_cond_wait_all(void **pp_conds, int n, uintptr_t offset, nm_status_t bitmask, nm_core_t p_core)
wait on multiple statuses at the same time
static void nm_cond_signal(nm_cond_status_t *p_cond, nm_status_t bitmask)
add a bit and wake up threads waiting for it
static nm_status_t nm_cond_test_locked(const nm_cond_status_t *p_cond, nm_status_t bitmask)
test whether the given bit is set in the status; locked, guaranteed consistency, slower
static void nm_cond_wait(nm_cond_status_t *p_cond, nm_status_t bitmask, nm_core_t p_core)
wait for the given bit to be set in the status; do active polling while waiting
static void nm_cond_mask(nm_cond_status_t *p_cond, nm_status_t bitmask)
static void nm_cond_destroy(nm_cond_status_t *p_cond)
free resources associated with a nm_cond_status_t object
void nm_core_monitor_add(nm_core_t p_core, struct nm_core_monitor_s *m)
Register an event monitor.
void(* nm_core_event_notifier_t)(const struct nm_core_event_s *const event, void *ref)
an event notifier, fired upon status transition
struct nm_core_event_s __attribute__
Definition nm_data.h:530
void nm_core_req_monitor(struct nm_core *p_core, struct nm_req_s *p_req, struct nm_monitor_s monitor)
set a per-request monitor.
void nm_core_monitor_remove(nm_core_t p_core, struct nm_core_monitor_s *m)
Unregister an event monitor.
int nm_core_driver_load_init(nm_core_t p_core, puk_component_t driver, nm_trk_kind_t kind, nm_drv_t *pp_drv, const char **p_url)
puk_component_t nm_core_component_load(const char *entity, const char *name)
void nm_core_schedopt_disable(nm_core_t p_core)
disable schedopt for raw driver use
struct nm_core * nm_core_t
int nm_schedule(nm_core_t p_core)
nm_thread_level_e
struct nm_drv_s * nm_drv_t
a nmad driver; opaque type for the user
struct nm_core_internal_s nm_core_internal
int nm_core_exit(nm_core_t p_core)
nm_thread_level_t nm_core_get_thread_level(nm_core_t)
Get the current thread level.
enum nm_thread_level_e nm_thread_level_t
int nm_core_set_strategy(nm_core_t p_core, puk_component_t strategy)
PUK_VECT_TYPE(nm_drv, nm_drv_t)
nm_gate_t nm_core_gate_new(nm_core_t p_core, nm_drv_vect_t *p_drvs)
Init a new gate, using the given set of drivers.
void nm_trace_add_synchro_point(void)
generate a synchronization event to synchronize nmad traces with others (e.g.
void nm_core_set_thread_level(nm_thread_level_t)
Sets the thread level before nm core init.
int nm_core_init(nm_core_t *pp_core)
void nm_core_gate_connect_wait(nm_core_t p_core, struct nm_trk_s *p_trk)
wait for connection completion
void nm_core_gate_connect_async(nm_core_t p_core, nm_gate_t gate, nm_drv_t p_drv, nm_trk_id_t trk_id, const char *url)
start connection process on given gate/trk
@ NM_THREAD_FUNNELED
@ NM_THREAD_SINGLE
@ NM_THREAD_MULTIPLE
@ NM_THREAD_SERIALIZED
int nm_core_unpack_iprobe(struct nm_core *p_core, struct nm_req_s *p_unpack)
probes whether an incoming packet matched this unposted request.
void nm_core_inject_finalize(struct nm_core *p_core, struct nm_req_s *p_req)
finalize an injected request that was only completed.
int nm_core_unpack_cancel(struct nm_core *p_core, struct nm_req_s *p_unpack)
cancel a pending unpack
void nm_core_pack_data(nm_core_t p_core, struct nm_req_s *p_pack, const struct nm_data_s *p_data)
build a pack request from data descriptor
PUK_LIST_DECLARE_TYPE(nm_req_chunk)
void nm_core_unpack_init(struct nm_core *p_core, struct nm_req_s *p_unpack)
initializes an empty unpack request
int nm_core_unpack_peek(struct nm_core *p_core, struct nm_req_s *p_unpack, const struct nm_data_s *p_data, nm_len_t peek_offset, nm_len_t peek_len)
peeks unexpected data without consumming it.
int nm_core_unpack_partition_test(struct nm_req_s *p_unpack, int partition)
void nm_core_inject_chunk(struct nm_core *p_core, nm_gate_t p_gate, nm_core_tag_t tag, nm_seq_t seq, nm_len_t chunk_offset, nm_len_t chunk_len, int is_last_chunk, nm_injector_pull_data_t p_pull_data, void *p_ref)
inject a packet in nmad core as if it arrived from network.
int nm_core_iprobe(struct nm_core *p_core, nm_gate_t p_gate, nm_core_tag_t tag, nm_core_tag_t tag_mask, nm_gate_t *pp_out_gate, nm_core_tag_t *p_out_tag, nm_len_t *p_out_size)
probe unexpected packet, check matching for (packet_tag & tag_mask) == tag
void nm_core_pack_send(struct nm_core *p_core, struct nm_req_s *p_pack, nm_core_tag_t tag, nm_gate_t p_gate, nm_req_flag_t flags)
set tag/gate/flags for pack request
void(* nm_injector_pull_data_t)(struct nm_req_s *p_req, const struct nm_data_s *p_data, nm_len_t chunk_offset, nm_len_t chunk_len, void *p_ref)
user-supplied function called to pull data to posted request through nmad core p_req is the user requ...
void nm_core_unpack_offset(struct nm_core *p_core, struct nm_req_s *p_unpack, nm_len_t offset)
set an offset on data; data before offset will be discarded
void nm_core_unpack_match_recv(struct nm_core *p_core, struct nm_req_s *p_unpack, nm_gate_t p_gate, nm_core_tag_t tag, nm_core_tag_t tag_mask)
match an unpack request with given gate/tag, next sequence number assumed
void nm_core_pack_init(struct nm_core *p_core, struct nm_req_s *p_pack)
initializes an empty pack request
void nm_core_inject_complete_finalize(struct nm_core *p_core, struct nm_req_s *p_req, nm_len_t chunk_offset, nm_len_t chunk_len)
notify data was injected in a matched request and finalize this request.
void nm_core_unpack_partition_set(struct nm_req_s *p_unpack, int n_partitions)
void nm_core_pack_submit(struct nm_core *p_core, struct nm_req_s *p_pack)
post a pack request
void nm_core_inject_complete(struct nm_core *p_core, struct nm_req_s *p_req, nm_len_t chunk_offset, nm_len_t chunk_len)
notify data was injected in a matched request, but do not finalize the request (the status will be NM...
void nm_core_unpack_data(struct nm_core *p_core, struct nm_req_s *p_unpack, const struct nm_data_s *p_data)
build an unpack request from data descriptor
void nm_core_flush(struct nm_core *p_core)
Flush pending packs (if supported by the strategy).
static void nm_core_pack_set_hlen(struct nm_core *p_core __attribute__((unused)), struct nm_req_s *p_pack, nm_len_t hlen)
set a header length for the given pack request
void nm_core_unpack_match_event(struct nm_core *p_core, struct nm_req_s *p_unpack, const struct nm_core_event_s *p_event)
match an unpack request with a packet that triggered an event
void nm_core_unpack_partition_free(struct nm_req_s *p_unpack)
void nm_core_unpack_submit(struct nm_core *p_core, struct nm_req_s *p_unpack, nm_req_flag_t flags)
submit an unpack request
void nm_core_pack_set_priority(struct nm_core *p_core, struct nm_req_s *p_pack, nm_prio_t priority)
set a priority for the given pack request
nm_seq_t nm_core_send_seq_get(struct nm_core *p_core, nm_gate_t p_gate, nm_core_tag_t tag)
get a seq number in the out stream, to route packet outside of nmad core
void nm_core_pack_submit_chunks(struct nm_core *p_core, struct nm_req_s *p_pack, int n, const struct nm_chunk_s *p_chunks)
static nm_session_hash_t nm_core_tag_get_hashcode(nm_core_tag_t core_tag)
uint32_t nm_session_hash_t
a session hashcode in tags, used to multiplex sessions
static nm_core_tag_t nm_core_tag_build(nm_session_hash_t hashcode, nm_tag_t tag)
static nm_tag_t nm_core_tag_get_tag(nm_core_tag_t core_tag)
nm_core_task_kind_e
enum nm_core_task_kind_e nm_core_task_kind_t
void nm_core_task_submit_unlocked(struct nm_core *p_core, void(*p_handler)(void))
submit task lock-free to the submission list This is used mostly for benchmarks.
void nm_core_task_submit_locked(struct nm_core *p_core, void(*p_handler)(void))
lock then submit task to pending list This is used mostly for benchmarks.
@ NM_CORE_TASK_COMPLETED_PREFETCH
prefetch completed; process RTR if received
@ NM_CORE_TASK_UNPACK_NEXT
try to match the next unpack on the given gate/tag/gtag
@ NM_CORE_TASK_RTR_SEND
send a RTR once the large pw for recv has been posted
@ NM_CORE_TASK_PACK_SUBMISSION
process a submitted pack request
@ NM_CORE_TASK_NONE
@ NM_CORE_TASK_NOP
a core task that does nothing (but is not invalid nor uninitialized)
@ NM_CORE_TASK_COMPLETED_PW
process a completed pw
@ NM_CORE_TASK_CANCELLED_PREFETCH
prefetch cancelled; release ref on pw
@ NM_CORE_TASK_HANDLER
call a user handler, mainly for testing/benchmarking
@ NM_CORE_TASK_INVALID
static void nm_spin_check_nothread(nm_spinlock_t *p_spin __attribute__((unused)))
check that we are always called from the same thread in case of non-threaded mode
static void nm_spin_init(nm_spinlock_t *p_spin)
init the spin lock
static void nm_spin_assert_notlocked(nm_spinlock_t *p_spin)
assert that current thread doesn't hold the lock
static int nm_spin_trylock(nm_spinlock_t *p_spin)
try to lock the spin lock return 1 if lock is successfully acquired, 0 otherwise
static void nm_spin_clear_nothread(nm_spinlock_t *p_spin __attribute__((unused)))
clear the last_tid tracking for lock consistency checking
static void nm_spin_destroy(nm_spinlock_t *p_spin)
destroy the spin lock
struct nm_spinlock_s nm_spinlock_t
static void nm_spin_lock(nm_spinlock_t *p_spin)
acquire the spin lock
static void nm_spin_assert_locked(nm_spinlock_t *p_spin)
assert that current thread holds the lock
static void nm_spin_unlock(nm_spinlock_t *p_spin)
release the spin lock
static void nm_status_signal(struct nm_req_s *p_req, nm_status_t bitmask)
add the bits from bitmak to the status and wakes-up all others waiting on nm_status_wait().
static void nm_status_wait_all(void **pp_reqs, int n, uintptr_t offset, nm_status_t bitmask, nm_core_t p_core)
wait for all reqs, any bit in bitmask
static void nm_status_unset(struct nm_req_s *p_req, nm_status_t bitmask)
remove bits of bitmask from req status
static void nm_status_spinwait(struct nm_req_s *p_req, nm_status_t status)
static void nm_status_init(struct nm_req_s *p_req, nm_status_t bitmask)
initialize cond status with given initial value
static int nm_status_test_allbits(struct nm_req_s *p_req, nm_status_t bitmask)
tests for all given bits in status
static void nm_status_destroy(struct nm_req_s *p_req)
static void nm_status_add(struct nm_req_s *p_req, nm_status_t bitmask)
add a bit to the status of the request; does not unlock others (no signal)
static void nm_status_wait(struct nm_req_s *p_req, nm_status_t bitmask, nm_core_t p_core)
wait for any bit matching in req status
static nm_status_t nm_status_test(const struct nm_req_s *p_req, nm_status_t bitmask)
query for given bits in req status; returns matched bits
static void nm_status_assert(struct nm_req_s *p_req __attribute__((unused)), nm_status_t value __attribute__((unused)))
nm_status_t nm_cond_status_t
status with synchronization (wait/signal)
uint32_t nm_req_flag_t
pack/unpack flags
#define NM_STATUS_FINALIZED
request is finalized, may be freed
#define NM_STATUS_MASK_FULL
mask to catch all bits of status
uint32_t nm_status_t
status bits of pack/unpack requests
uint32_t nm_req_chunk_flag_t
flags for req_chunk
without even the implied warranty of !MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE See the GNU !General Public License for more details !mpif h
Definition mpif.h:19
static void nm_refcount_inc_internal(struct nm_refcount_s *p_refcount, const void *p_holder __attribute__((unused)), const char *func __attribute__((unused)), const char *file __attribute__((unused)), const int line __attribute__((unused)))
static void nm_refcount_init(struct nm_refcount_s *p_refcount, char *p_object_id, const void *p_init_holder)
initialize a new refcount object; get the object ID to make debugging easier; we take ownership of ob...
nm_session_hash_t hashcode
the session hashcode
PUK_LIST_TYPE(nm_refcount_holder, const void *p_holder;const char *func;const char *file;int line;)
elements for nm_refcount_s; a holder in the refcount
nm_tag_t tag
the user-supplied tag
static void nm_refcount_dump(struct nm_refcount_s *p_refcount __attribute__((unused)))
static void nm_refcount_destroy(struct nm_refcount_s *p_refcount __attribute__((unused)))
static int nm_refcount_dec(struct nm_refcount_s *p_refcount, const void *p_holder __attribute__((unused)))
decrement refcount for holder; returns refcount (if 0, caller may free ref-counted resource)
static int nm_refcount_get(struct nm_refcount_s *p_refcount)
assert(p_data->ops.p_traversal !=NULL)
nm_data_propertie_gpu_preinit & p_data
Definition nm_data.h:530
static nm_gate_t p_gate
nm_prio_t priority
Definition nm_headers.h:6
uint16_t hlen
length in header (header + data in header)
Definition nm_headers.h:5
nm_len_t chunk_len
length of this chunk
Definition nm_headers.h:4
nm_trk_id_t trk_id
index of the track relative to the gate
Definition nm_headers.h:3
nm_len_t chunk_offset
offset of the enclosed chunk
Definition nm_headers.h:4
nm_seq_t seq
sequence number
Definition nm_headers.h:2
Basic primitives to display info & warnings.
#define NM_FATAL(format,...)
Definition nm_log.h:36
#define NM_WARN(format,...)
Definition nm_log.h:34
nm_gate_t gate
gate of the destination or the source node
nm_mpi_count_t count
number of elements to be exchanged
nm_mpi_status_t status
status of request
nm_onesided_flag_t flags
This is the common public header for NewMad.
int8_t nm_trk_id_t
ID of a track, assigned in order.
Definition nm_types.h:86
uint64_t nm_req_seq_t
sequence number for requests
Definition nm_types.h:111
int32_t nm_prio_t
message priority
Definition nm_types.h:78
uint64_t nm_tag_t
user tags, 64 bits, contained in indirect hashtable
Definition nm_types.h:56
uint64_t nm_len_t
data length used by nmad
Definition nm_types.h:68
uint32_t nm_seq_t
Sequence number for packets on a given gate/tag.
Definition nm_types.h:102
uint8_t nm_proto_t
protocol flags- not part of the public API, but needed for inline
Definition nm_types.h:99
enum nm_trk_kind_e nm_trk_kind_t
nm_len_t chunk_offset
matching info for global monitors
nm_core_tag_t tag_mask
the mask to apply before comparing tags (only bits set in mask will be checked)
nm_core_tag_t tag
the tag to listen too
nm_gate_t p_gate
the gate to listen to, or NM_ANY_GATE for any
An event, generated by the NewMad core.
struct nm_req_s * p_req
the request that matched the event- NULL in case of unexpected packets
nm_status_t status
status flags- describe the event
exposed here for inlining; do not use this value, use the accessor nm_core_get_singleton()
global monitor for status transitions
struct nm_monitor_s monitor
the monitor to fire upon matching event
struct nm_core_event_matching_s matching
packet matching information
An internal tag.
nm_tag_t tag
the user-supplied tag
nm_session_hash_t hashcode
the session hashcode
asynchronous tasks for nmad core.
struct nm_req_s * p_unpack
struct nm_core_task_s::@7::@13 rtr_send
struct nm_core_task_s::@7::@9 completed_pw
struct nm_core_task_s::@7::@11 cancelled_prefetch
struct nm_core_task_s::@7::@14 handler
struct nm_core_task_s::@7::@12 pack_submission
void(* p_handler)(void)
enum nm_core_task_kind_e kind
union nm_core_task_s::@7 content
struct nm_core_task_s::@7::@8 unpack_next
struct nm_pkt_wrap_s * p_pw
struct nm_req_chunk_s * p_req_chunk
struct nm_core_task_s::@7::@10 completed_prefetch
struct nm_unexpected_s * p_unexpected
Core NewMadeleine structure.
Definition nm_core.h:43
block of static properties for a given data descriptor
Definition nm_data.h:93
a data descriptor, used to pack/unpack data from app layout to/from contiguous buffers
Definition nm_data.h:199
a driver.
Definition nm_drv.h:35
struct nm_core * p_core
Definition nm_drv.h:62
const char * url
driver url, as string
Definition nm_drv.h:55
const struct nm_minidriver_iface_s * driver
Driver interface, for use when no instance is needed.
Definition nm_drv.h:46
Connection to another process.
Definition nm_gate.h:104
status of tags on each gate
Definition nm_gate.h:27
containers for matching info, used for caching
struct nm_gtag_s * p_gtag
cache of gtag
struct nm_matching_gsession_s * p_gsession
cache of matching gsession
struct nm_matching_wildcard_s * p_wildcard
cache of matching wildcard
struct nm_matching_tag_s * p_matching_tag
cache of matching tag
struct to store matching info for any-source requests of a given tag
Definition nm_tags.h:146
struct to store matching info for wildcard requests, one per session
Definition nm_tags.h:126
generic monitor, used for requests and for global events (with matching)
nm_status_t event_mask
mask applied to status to check whether to fire events
nm_core_event_notifier_t p_notifier
notification function called to fire events
void * ref
opaque user-supplied pointer passed to notifier
Internal packet wrapper.
a reference-counter that keeps trace of who increments/decrements in debug: full reference tracking i...
int refcount
the counter itself
a chunk of request
PUK_LIST_LINK(nm_req_chunk)
nm_len_t chunk_offset
offset of the chunk relative to the full data in the req
struct nm_core_task_s core_task
nm_proto_t proto_flags
pre-computed proto flags
struct nm_data_properties_s chunk_props
properties of the data chunk
nm_len_t chunk_len
length of the chunk
struct nm_req_s * p_req
link to insert the req chunk as a core task
a generic pack/unpack request
nm_core_tag_t tag
tag to send to/from (works in combination with tag_mask for recv)
struct nm_req_s::@15::@18 unpack
struct nm_req_s::@15::@17 pack
nm_len_t expected_len
length of posted recv (may be updated if matched packet is shorter)
PUK_LIST_LINK(nm_req)
link to enqueue req in pending requests lists
nm_gate_t p_gate
dest/src gate; NULL if recv from any source
struct nm_matching_container_s matching
link to store request in a matching map
nm_len_t done
cumulated length of data sent so far
nm_prio_t priority
request priority level
uint32_t checksum
data checkusm when pack was submitted- for debug only
struct nm_req_pchunk_s * p_next
struct nm_gtag_s * p_gtag
cache for tag status on gate; NULL if tag or gate is unspecified yet
nm_len_t offset
offset of data partially received
struct nm_req_s::@15::@18::@19::nm_req_pchunk_s * p_pchunks
unsorted list of arrived chunks; reads are lock-free, writes are within core_core_lock sections
nm_cond_status_t status
status, including status bits and synchronization
nm_req_seq_t req_seq
request sequence number used to interleave wildcard/non-wildcard requests
nm_len_t chunk_offset
struct nm_req_chunk_s req_chunk
preallocated chunk for the common case (single-chunk)
nm_req_flag_t flags
flags given by user
struct nm_data_s data
data descriptor to send/recv
struct nm_pkt_wrap_s * p_prefetch_pw
packet wrapper to prefetch recv
nm_core_tag_t tag_mask
mask applied to tag for matching (only bits in mask need to match)
nm_len_t cumulated_len
amount of data unpacked so far
struct nm_req_s::@15::@18::@19 partition
partitioned unpack, used only if NM_REQ_FLAG_UNPACK_PARTITIONED is set
nm_len_t len
cumulated data length
int err
error status of the request
struct nm_monitor_s monitor
monitor attached to this request (only 1)
nm_seq_t seq
packet sequence number on the given tag
nm_len_t chunk_len
nm_len_t hlen
length of header to send eagerly
a track on a given gate
Definition nm_gate.h:69
a chunk of unexpected message to be stored