ReadWriteMutex

This class represents a mutex that allows any number of readers to enter, but when a writer enters, all other readers and writers are blocked.

Please note that this mutex is not recursive and is intended to guard access to data only. Also, no deadlock checking is in place because doing so would require dynamic memory allocation, which would reduce performance by an unacceptable amount. As a result, any attempt to recursively acquire this mutex may well deadlock the caller, particularly if a write lock is acquired while holding a read lock, or vice-versa. In practice, this should not be an issue however, because it is uncommon to call deeply into unknown code while holding a lock that simply protects data.

Constructors

this
this(Policy policy)

Initializes a read/write mutex object with the supplied policy.

Members

Classes

Reader
class Reader

This class can be considered a mutex in its own right, and is used to negotiate a read lock for the enclosing mutex.

Writer
class Writer

This class can be considered a mutex in its own right, and is used to negotiate a write lock for the enclosing mutex.

Enums

Policy
enum Policy

Defines the policy used by this mutex. Currently, two policies are defined.

Properties

policy
Policy policy [@property getter]

Gets the policy used by this mutex.

reader
Reader reader [@property getter]
shared(Reader) reader [@property getter]

Gets an object representing the reader lock for the associated mutex.

writer
Writer writer [@property getter]
shared(Writer) writer [@property getter]

Gets an object representing the writer lock for the associated mutex.

Examples

///////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////

1 import core.atomic, core.thread, core.sync.semaphore;
2 
3 static void runTest(ReadWriteMutex.Policy policy)
4 {
5     scope mutex = new ReadWriteMutex(policy);
6     scope rdSemA = new Semaphore, rdSemB = new Semaphore,
7           wrSemA = new Semaphore, wrSemB = new Semaphore;
8     shared size_t numReaders, numWriters;
9 
10     void readerFn()
11     {
12         synchronized (mutex.reader)
13         {
14             atomicOp!"+="(numReaders, 1);
15             rdSemA.notify();
16             rdSemB.wait();
17             atomicOp!"-="(numReaders, 1);
18         }
19     }
20 
21     void writerFn()
22     {
23         synchronized (mutex.writer)
24         {
25             atomicOp!"+="(numWriters, 1);
26             wrSemA.notify();
27             wrSemB.wait();
28             atomicOp!"-="(numWriters, 1);
29         }
30     }
31 
32     void waitQueued(size_t queuedReaders, size_t queuedWriters)
33     {
34         for (;;)
35         {
36             synchronized (mutex.m_commonMutex)
37             {
38                 if (mutex.m_numQueuedReaders == queuedReaders &&
39                     mutex.m_numQueuedWriters == queuedWriters)
40                     break;
41             }
42             Thread.yield();
43         }
44     }
45 
46     scope group = new ThreadGroup;
47 
48     // 2 simultaneous readers
49     group.create(&readerFn); group.create(&readerFn);
50     rdSemA.wait(); rdSemA.wait();
51     assert(numReaders == 2);
52     rdSemB.notify(); rdSemB.notify();
53     group.joinAll();
54     assert(numReaders == 0);
55     foreach (t; group) group.remove(t);
56 
57     // 1 writer at a time
58     group.create(&writerFn); group.create(&writerFn);
59     wrSemA.wait();
60     assert(!wrSemA.tryWait());
61     assert(numWriters == 1);
62     wrSemB.notify();
63     wrSemA.wait();
64     assert(numWriters == 1);
65     wrSemB.notify();
66     group.joinAll();
67     assert(numWriters == 0);
68     foreach (t; group) group.remove(t);
69 
70     // reader and writer are mutually exclusive
71     group.create(&readerFn);
72     rdSemA.wait();
73     group.create(&writerFn);
74     waitQueued(0, 1);
75     assert(!wrSemA.tryWait());
76     assert(numReaders == 1 && numWriters == 0);
77     rdSemB.notify();
78     wrSemA.wait();
79     assert(numReaders == 0 && numWriters == 1);
80     wrSemB.notify();
81     group.joinAll();
82     assert(numReaders == 0 && numWriters == 0);
83     foreach (t; group) group.remove(t);
84 
85     // writer and reader are mutually exclusive
86     group.create(&writerFn);
87     wrSemA.wait();
88     group.create(&readerFn);
89     waitQueued(1, 0);
90     assert(!rdSemA.tryWait());
91     assert(numReaders == 0 && numWriters == 1);
92     wrSemB.notify();
93     rdSemA.wait();
94     assert(numReaders == 1 && numWriters == 0);
95     rdSemB.notify();
96     group.joinAll();
97     assert(numReaders == 0 && numWriters == 0);
98     foreach (t; group) group.remove(t);
99 
100     // policy determines whether queued reader or writers progress first
101     group.create(&writerFn);
102     wrSemA.wait();
103     group.create(&readerFn);
104     group.create(&writerFn);
105     waitQueued(1, 1);
106     assert(numReaders == 0 && numWriters == 1);
107     wrSemB.notify();
108 
109     if (policy == ReadWriteMutex.Policy.PREFER_READERS)
110     {
111         rdSemA.wait();
112         assert(numReaders == 1 && numWriters == 0);
113         rdSemB.notify();
114         wrSemA.wait();
115         assert(numReaders == 0 && numWriters == 1);
116         wrSemB.notify();
117     }
118     else if (policy == ReadWriteMutex.Policy.PREFER_WRITERS)
119     {
120         wrSemA.wait();
121         assert(numReaders == 0 && numWriters == 1);
122         wrSemB.notify();
123         rdSemA.wait();
124         assert(numReaders == 1 && numWriters == 0);
125         rdSemB.notify();
126     }
127     group.joinAll();
128     assert(numReaders == 0 && numWriters == 0);
129     foreach (t; group) group.remove(t);
130 }
131 runTest(ReadWriteMutex.Policy.PREFER_READERS);
132 runTest(ReadWriteMutex.Policy.PREFER_WRITERS);

Meta