LLVM  14.0.0git
RWMutex.cpp
Go to the documentation of this file.
1 //===- RWMutex.cpp - Reader/Writer Mutual Exclusion Lock --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the llvm::sys::RWMutex class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Support/Allocator.h"
14 #include "llvm/Support/RWMutex.h"
15 #include "llvm/Config/config.h"
16 
17 #if defined(LLVM_USE_RW_MUTEX_IMPL)
18 using namespace llvm;
19 using namespace sys;
20 
21 #if !defined(LLVM_ENABLE_THREADS) || LLVM_ENABLE_THREADS == 0
22 // Define all methods as no-ops if threading is explicitly disabled
23 
24 RWMutexImpl::RWMutexImpl() = default;
25 RWMutexImpl::~RWMutexImpl() = default;
26 
27 bool RWMutexImpl::lock_shared() { return true; }
28 bool RWMutexImpl::unlock_shared() { return true; }
29 bool RWMutexImpl::lock() { return true; }
30 bool RWMutexImpl::unlock() { return true; }
31 
32 #else
33 
34 #if defined(HAVE_PTHREAD_H) && defined(HAVE_PTHREAD_RWLOCK_INIT)
35 
36 #include <cassert>
37 #include <cstdlib>
38 #include <pthread.h>
39 
40 // Construct a RWMutex using pthread calls
41 RWMutexImpl::RWMutexImpl()
42 {
43  // Declare the pthread_rwlock data structures
44  pthread_rwlock_t* rwlock =
45  static_cast<pthread_rwlock_t*>(safe_malloc(sizeof(pthread_rwlock_t)));
46 
47 #ifdef __APPLE__
48  // Workaround a bug/mis-feature in Darwin's pthread_rwlock_init.
49  bzero(rwlock, sizeof(pthread_rwlock_t));
50 #endif
51 
52  // Initialize the rwlock
53  int errorcode = pthread_rwlock_init(rwlock, nullptr);
54  (void)errorcode;
55  assert(errorcode == 0);
56 
57  // Assign the data member
58  data_ = rwlock;
59 }
60 
61 // Destruct a RWMutex
62 RWMutexImpl::~RWMutexImpl()
63 {
64  pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
65  assert(rwlock != nullptr);
66  pthread_rwlock_destroy(rwlock);
67  free(rwlock);
68 }
69 
70 bool
71 RWMutexImpl::lock_shared()
72 {
73  pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
74  assert(rwlock != nullptr);
75 
76  int errorcode = pthread_rwlock_rdlock(rwlock);
77  return errorcode == 0;
78 }
79 
80 bool
81 RWMutexImpl::unlock_shared()
82 {
83  pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
84  assert(rwlock != nullptr);
85 
86  int errorcode = pthread_rwlock_unlock(rwlock);
87  return errorcode == 0;
88 }
89 
90 bool
91 RWMutexImpl::lock()
92 {
93  pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
94  assert(rwlock != nullptr);
95 
96  int errorcode = pthread_rwlock_wrlock(rwlock);
97  return errorcode == 0;
98 }
99 
100 bool
101 RWMutexImpl::unlock()
102 {
103  pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
104  assert(rwlock != nullptr);
105 
106  int errorcode = pthread_rwlock_unlock(rwlock);
107  return errorcode == 0;
108 }
109 
110 #else
111 
112 RWMutexImpl::RWMutexImpl() : data_(new MutexImpl(false)) { }
113 
114 RWMutexImpl::~RWMutexImpl() {
115  delete static_cast<MutexImpl *>(data_);
116 }
117 
118 bool RWMutexImpl::lock_shared() {
119  return static_cast<MutexImpl *>(data_)->acquire();
120 }
121 
122 bool RWMutexImpl::unlock_shared() {
123  return static_cast<MutexImpl *>(data_)->release();
124 }
125 
126 bool RWMutexImpl::lock() {
127  return static_cast<MutexImpl *>(data_)->acquire();
128 }
129 
130 bool RWMutexImpl::unlock() {
131  return static_cast<MutexImpl *>(data_)->release();
132 }
133 
134 #endif
135 #endif
136 #endif
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AllocatorList.h:23
RWMutex.h
Allocator.h
llvm::AtomicOrderingCABI::release
@ release
new
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM ID Predecessors according to mbb< bb27, 0x8b0a7c0 > Note ADDri is not a two address instruction its result reg1037 is an operand of the PHI node in bb76 and its operand reg1039 is the result of the PHI node We should treat it as a two address code and make sure the ADDri is scheduled after any node that reads reg1039 Use info(i.e. register scavenger) to assign it a free register to allow reuse the collector could move the objects and invalidate the derived pointer This is bad enough in the first but safe points can crop up unpredictably **array_addr i32 n y store obj * new
Definition: README.txt:125
false
Definition: StackSlotColoring.cpp:142
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::AtomicOrderingCABI::acquire
@ acquire
llvm::safe_malloc
LLVM_ATTRIBUTE_RETURNS_NONNULL void * safe_malloc(size_t Sz)
Definition: MemAlloc.h:25