summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/atomic.h23
-rw-r--r--runtime/debug.c16
-rw-r--r--runtime/debug.h3
-rw-r--r--runtime/msg.c4
-rw-r--r--runtime/queue.c12
5 files changed, 42 insertions, 16 deletions
diff --git a/runtime/atomic.h b/runtime/atomic.h
index 430ae7f0..2dbe7f52 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -1,6 +1,6 @@
/* This header supplies atomic operations. So far, we rely on GCC's
- * atomic builtins. I have no idea if we can check them via autotools,
- * but I am making the necessary provisioning to live without them if
+ * atomic builtins. During configure, we check if atomic operatons are
+ * available. If they are not, I am making the necessary provisioning to live without them if
* they are not available. Please note that you should only use the macros
* here if you think you can actually live WITHOUT an explicit atomic operation,
* because in the non-presence of them, we simply do it without atomicitiy.
@@ -36,16 +36,21 @@
#ifndef INCLUDED_ATOMIC_H
#define INCLUDED_ATOMIC_H
-/* set the following to 1 if we have atomic operations (and #undef it otherwise) */
-/* #define DO_HAVE_ATOMICS 1 */
/* for this release, we disable atomic calls because there seem to be some
* portability problems and we can not fix that without destabilizing the build.
* They simply came in too late. -- rgerhards, 2008-04-02
*/
-/* make sure they are not used!
-#define ATOMIC_INC(data) ((void) __sync_fetch_and_add(&data, 1))
-#define ATOMIC_DEC_AND_FETCH(data) __sync_sub_and_fetch(&data, 1)
-*/
-#define ATOMIC_INC(data) (++(data))
+#ifdef HAVE_ATOMIC_BUILTINS
+# define ATOMIC_INC(data) ((void) __sync_fetch_and_add(&(data), 1))
+# define ATOMIC_DEC_AND_FETCH(data) __sync_sub_and_fetch(&(data), 1)
+# define ATOMIC_FETCH_32BIT(data) ((unsigned) __sync_fetch_and_and(&(data), 0xffffffff))
+# define ATOMIC_STORE_1_TO_32BIT(data) __sync_lock_test_and_set(&(data), 1)
+#else
+# warning "atomic builtins not available, using nul operations"
+# define ATOMIC_INC(data) (++(data))
+# define ATOMIC_DEC_AND_FETCH(data) (--(data))
+# define ATOMIC_FETCH_32BIT(data) (data)
+# define ATOMIC_STORE_1_TO_32BIT(data) (data) = 1
+#endif
#endif /* #ifndef INCLUDED_ATOMIC_H */
diff --git a/runtime/debug.c b/runtime/debug.c
index 1450d029..7ed1442b 100644
--- a/runtime/debug.c
+++ b/runtime/debug.c
@@ -480,7 +480,23 @@ static inline void dbgMutexUnlockLog(pthread_mutex_t *pmut, dbgFuncDB_t *pFuncDB
pthread_mutex_lock(&mutMutLog);
pLog = dbgMutLogFindSpecific(pmut, MUTOP_LOCK, NULL);
+#if 0 /* toggle for testing */
assert(pLog != NULL);
+#else
+/* the change below seems not to work - the problem seems to be a real race... I keep this code in just in case
+ * I need to re-use it. It should be removed once we are finished analyzing this problem. -- rgerhards, 2008-09-17
+ */
+if(pLog == NULL) {
+ /* this may happen due to some races. We do not try to avoid
+ * this, as it would complicate the "real" code. This is not justified
+ * just to keep the debug info system up. -- rgerhards, 2008-09-17
+ */
+ pthread_mutex_unlock(&mutMutLog);
+ dbgprintf("%s:%d:%s: mutex %p UNlocked [but we did not yet know this mutex!]\n",
+ pFuncDB->file, unlockLn, pFuncDB->func, (void*)pmut);
+ return; /* if we don't know it yet, we can not clean up... */
+}
+#endif
/* we found the last lock entry. We now need to see from which FuncDB we need to
* remove it. This is recorded inside the mutex log entry.
diff --git a/runtime/debug.h b/runtime/debug.h
index 214b7c05..d9d576b5 100644
--- a/runtime/debug.h
+++ b/runtime/debug.h
@@ -130,7 +130,8 @@ void dbgPrintAllDebugInfo(void);
/* debug aides */
-#ifdef RTINST
+//#ifdef RTINST
+#if 0 // temporarily removed for helgrind
#define d_pthread_mutex_lock(x) dbgMutexLock(x, pdbgFuncDB, __LINE__, dbgCALLStaCK_POP_POINT )
#define d_pthread_mutex_unlock(x) dbgMutexUnlock(x, pdbgFuncDB, __LINE__, dbgCALLStaCK_POP_POINT )
#define d_pthread_cond_wait(cond, mut) dbgCondWait(cond, mut, pdbgFuncDB, __LINE__, dbgCALLStaCK_POP_POINT )
diff --git a/runtime/msg.c b/runtime/msg.c
index f4eb9414..346bbc5f 100644
--- a/runtime/msg.c
+++ b/runtime/msg.c
@@ -276,8 +276,10 @@ CODESTARTobjDestruct(msg)
# ifdef DO_HAVE_ATOMICS
currRefCount = ATOMIC_DEC_AND_FETCH(pThis->iRefCount);
# else
+ MsgLock(pThis);
currRefCount = --pThis->iRefCount;
# endif
+// we need a mutex, because we may be suspended after getting the refcount but before
if(currRefCount == 0)
{
/* DEV Debugging Only! dbgprintf("msgDestruct\t0x%lx, RefCount now 0, doing DESTROY\n", (unsigned long)pThis); */
@@ -337,9 +339,11 @@ CODESTARTobjDestruct(msg)
rsCStrDestruct(&pThis->pCSPROCID);
if(pThis->pCSMSGID != NULL)
rsCStrDestruct(&pThis->pCSMSGID);
+ MsgUnlock(pThis);
funcDeleteMutex(pThis);
} else {
pThis = NULL; /* tell framework not to destructing the object! */
+ MsgUnlock(pThis);
}
ENDobjDestruct(msg)
diff --git a/runtime/queue.c b/runtime/queue.c
index 7e7d4152..c0a37019 100644
--- a/runtime/queue.c
+++ b/runtime/queue.c
@@ -2171,17 +2171,17 @@ queueEnqObj(queue_t *pThis, flowControl_t flowCtlType, void *pUsr)
finalize_it:
if(pThis->qType != QUEUETYPE_DIRECT) {
- d_pthread_mutex_unlock(pThis->mut);
+ /* make sure at least one worker is running. */
+ if(pThis->qType != QUEUETYPE_DIRECT) {
+ queueAdviseMaxWorkers(pThis);
+ }
+ /* and release the mutex */
i = pthread_cond_signal(&pThis->notEmpty);
+ d_pthread_mutex_unlock(pThis->mut);
dbgoprint((obj_t*) pThis, "EnqueueMsg signaled condition (%d)\n", i);
pthread_setcancelstate(iCancelStateSave, NULL);
}
- /* make sure at least one worker is running. */
- if(pThis->qType != QUEUETYPE_DIRECT) {
- queueAdviseMaxWorkers(pThis);
- }
-
RETiRet;
}