Bug in Hierarchical CLH Queue Lock from "The Art of Multiprocessor Programming" 1th version

-1

What I am asking for help?

Suggestion for debug or correct the lock algorithm.

import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;

public class TestHCLHLock {
    static final int MAX_CLUSTERS = 8;
    public static class ThreadID {
        private static volatile int nextID = 0;
        private static ThreadLocalID threadID = new ThreadLocalID();
        public static int get() { return threadID.get();}
        public static void reset() { nextID = 0; }
        public static void set(int value) { threadID.set(value);}
        public static int getCluster(int n) {return threadID.get() % n;}
        private static class ThreadLocalID extends ThreadLocal<Integer> {
            protected synchronized Integer initialValue() { return nextID++; }}}

    static class QNode {
        private static final int TWS_MASK = 0x80000000;
        private static final int SMW_MASK = 0x40000000;
        private static final int CLUSTER_MASK = 0x3FFFFFFF;
        AtomicInteger state;
        public QNode() { state = new AtomicInteger(0); }

        boolean waitForGrantOrClusterMaster(int myCluster) {
            while (true) {
                if (getClusterID() == myCluster && !isTailWhenSpliced() && !isSuccessorMustWait()) {
                    return true;
                } else if (getClusterID() != myCluster || isTailWhenSpliced()) {
                    return false;
                } } }
                
        public void unlock() {
            int oldState = 0;
            int newState = ThreadID.getCluster(MAX_CLUSTERS) & CLUSTER_MASK;
            newState |= SMW_MASK;
            newState &= (~TWS_MASK);
            do {
                oldState = state.get();
            } while (!state.compareAndSet(oldState, newState)); }

        public int getClusterID() { return state.get() & CLUSTER_MASK; }

        public void setClusterID(int clusterID) {
            int oldState, newState;
            do {
                oldState = state.get();
                newState = (oldState & ~CLUSTER_MASK) | clusterID;
            } while (!state.compareAndSet(oldState, newState)); }

        public boolean isSuccessorMustWait() { return (state.get() & SMW_MASK) != 0; }

        public void setSuccessorMustWait(boolean successorMustWait) {
            int oldState, newState;
            do {
                oldState = state.get();
                if (successorMustWait) {
                    newState = oldState | SMW_MASK;
                } else {
                    newState = oldState & ~SMW_MASK;
                }
            } while (!state.compareAndSet(oldState, newState)); }

        public boolean isTailWhenSpliced() { return (state.get() & TWS_MASK) != 0; }

        public void setTailWhenSpliced(boolean tailWhenSpliced) {
            int oldState, newState;
            do {
                oldState = state.get();
                if (tailWhenSpliced) {
                    newState = oldState | TWS_MASK;
                } else {
                    newState = oldState & ~TWS_MASK;
                }
            } while (!state.compareAndSet(oldState, newState)); }}

    public class HCLHLock implements Lock {

        List<AtomicReference<QNode>> localQueues;
        AtomicReference<QNode> globalQueue;

        ThreadLocal<QNode> currNode = new ThreadLocal<QNode>() {protected QNode initialValue() { return new QNode(); };};
        ThreadLocal<QNode> preNode = new ThreadLocal<QNode>() {protected QNode initialValue() { return null; };};

        public HCLHLock() {
            localQueues = new ArrayList<AtomicReference<QNode>>(MAX_CLUSTERS);
            for (int i = 0; i < MAX_CLUSTERS; i++) {
                localQueues.add(new AtomicReference<QNode>());
            }
            QNode head = new QNode();
            globalQueue = new AtomicReference<QNode>(head); }

        public void lock() {
            QNode myLocalNode = currNode.get();
            int myCluster = ThreadID.getCluster(MAX_CLUSTERS);
            myLocalNode.setClusterID(myCluster);
            AtomicReference<QNode> localQueue = localQueues.get(ThreadID.getCluster(MAX_CLUSTERS));
            QNode myLocalPred = null;
            QNode myGlobalPred = null;
            QNode localTail = null;
            do {
                myLocalPred = localQueue.get();
            } while (!localQueue.compareAndSet(myLocalPred, myLocalNode));

            if (myLocalPred != null) {
                boolean iOwnLock = myLocalPred.waitForGrantOrClusterMaster(myCluster);
                preNode.set(myLocalPred);
                if (iOwnLock) { return; }
            }
            do {
                myGlobalPred = globalQueue.get();
                localTail = localQueue.get();
            } while (!globalQueue.compareAndSet(myGlobalPred, localTail));

            localTail.setTailWhenSpliced(true);
            while (myGlobalPred.isSuccessorMustWait()) {
            }
            preNode.set(myGlobalPred); }

        public void unlock() {
            QNode myNode = currNode.get();
            myNode.setSuccessorMustWait(false);
            QNode myPred= preNode.get();
            if (myPred != null){
                myPred.unlock();
                currNode.set(myPred); } }

        public void lockInterruptibly() throws InterruptedException { throw new UnsupportedOperationException(); }
        public boolean tryLock() { throw new UnsupportedOperationException(); }
        public boolean tryLock(long time, TimeUnit unit) throws InterruptedException { throw new UnsupportedOperationException(); }
        public Condition newCondition() { throw new UnsupportedOperationException(); }
    }

    private final static int THREADS = 32;
    private final static int COUNT = 32 * 64;
    private final static int PER_THREAD = COUNT / THREADS;
    int counter = 0;
    HCLHLock instance = new HCLHLock();

    public  class MyThread extends Thread {
        public void run() {
            for (int i = 0; i < PER_THREAD; i++) {
                instance.lock();
                try {
                    counter = counter + 1;
                } finally {
                    instance.unlock();
                }}}}

    public  void test ()  throws Exception{
        Thread[] thread = new Thread[THREADS];
        for (int i = 0; i < THREADS; i++) {
            thread[i] = new MyThread();
        }
        for (int i = 0; i < THREADS; i++) {
            thread[i].start();
        }
        for (int i = 0; i < THREADS; i++) {
            thread[i].join();
        }
        System.out.println(String.format("expect %d, but %d", 2048, counter));
    }

    public static void main(String[] args) throws Exception{
         TestHCLHLock test = new TestHCLHLock();
         test.test(); }
}

If you are interested you can take a look at https://github.com/jiamo/HCLHlock for the completed version. One strange behavior is: add a printf in function isSuccessorMustWait can make the test pass easily.(https://github.com/jiamo/HCLHlock/blob/main/src/main/java/HCLHLock.java#L177)

------------- merge test code into one class ------------------

expect 2048, but 2047

The main func always output like above. So the lock can't lock the critical code. But the output count was almost close to 2048 which mean most time the algorithm can success lock it.

------------- a more strong version but still wrong -----------

if got ownLock. wait again.

-                 preNode.set(myLocalPred);
-                 if (iOwnLock) { return; }
-             }
+                 if (iOwnLock) {
+                     while (myLocalPred.isSuccessorMustWait()) { }
+                     preNode.set(myLocalPred);
+                     return;
+                 } }

------------- as test by @mevets -----------

There is dead lock in this algorithm accidentally.

java
algorithm
concurrency
locking
lock-free
asked on Stack Overflow Jan 29, 2021 by jiamo • edited Feb 6, 2021 by jiamo

1 Answer

0

I had some slightly different results:

for i in 1 2 3 4 5 6 7 8 9 10; do java TestHCLHLock; done
expect 2048, but 2044
expect 2048, but 2042
expect 2048, but 2016
expect 2048, but 2042
expect 2048, but 2018
expect 2048, but 2004
expect 2048, but 2043
expect 2048, but 2024

_ Notice it didn't finish the 8th run. Still spinning madly. So, I put some counters in the busy-waiting loops, and printed a message:

state = c0000001
state = c0000005
state = c0000005
state = c0000007
state = c0000005
state = c0000000
state = c0000004
...

this is from printing the value of state() while line 170 loops too many times. I hope this helps. I'm not a java person, but I presume there are debuggers and stuff.

this was on a linux machine with 20 cores + 20 threads, I believe a 2x(10+10).

answered on Stack Overflow Feb 5, 2021 by mevets

User contributions licensed under CC BY-SA 3.0