Lock free stack benchmark

This commit is contained in:
alex 2013-03-26 15:46:32 -07:00
parent c325e5aae4
commit 1c1c47cd13
15 changed files with 2701 additions and 4 deletions

View File

@ -1,4 +0,0 @@
lockfree-bench
==============
Benchmarks for lockfree datastructures.

8
stack/README.md Normal file
View File

@ -0,0 +1,8 @@
lockfree-bench
==============
Lock free stack benchmark. Use
g++ -std=c++0x benchmark.cpp -o benchmark -lpthread -O2
to compile.

7
stack/atomic.h Normal file
View File

@ -0,0 +1,7 @@
// A placeholder for <atomic> until we upgrade gcc to a version that
// is more sensible with regard to memory fences
//
#pragma once
#include "boost/atomic.hpp"

BIN
stack/benchmark Executable file

Binary file not shown.

116
stack/benchmark.cpp Normal file
View File

@ -0,0 +1,116 @@
#define MAX_THREADS 32
#include <cstdlib>
#include <thread>
#include <vector>
#include <sys/time.h>
#include <cassert>
#include "locked.h"
#include "lockfree.h"
struct LockedElement
{
int data;
};
struct LockFreeElement: public LockFreeStack::Node
{
int data;
};
boost::atomic<bool> running;
template<class Stack, class Element>
void Worker(Stack& st, Element* elems, int numElements, int* numOps, int threadId)
{
unsigned int seed = rand();
std::vector<Element*> mine;
int ops = 0;
for(int i=0; i<numElements; i++)
{
mine.push_back(&elems[i]);
elems[i].data = 0;
}
while(!running.load(boost::memory_order_acquire)){}
while(running.load(boost::memory_order_acquire))
{
Element* elem;
switch(rand_r(&seed)&1)
{
case 0:
if(mine.size())
{
elem = mine.back();
assert(elem->data == 0);
elem->data = 1;
mine.pop_back();
st.Push(elem);
}
ops++;
break;
case 1:
elem = static_cast<Element*>(st.Pop(threadId));
if(elem != nullptr)
{
assert(elem->data == 1);
elem->data = 0;
mine.push_back(elem);
}
ops++;
break;
}
}
*numOps = ops;
}
template<class Stack, class Element>
double Test(int nthreads)
{
const int num_elements = 20000;
const int test_time = 5;
const int test_iterations = 5;
const int elem_per_thread = num_elements / nthreads;
long long ops = 0;
for(int it = 0; it < test_iterations; it++)
{
Stack st;
Element* elements = new Element[num_elements];
struct timeval starttime, endtime;
std::thread threads[MAX_THREADS];
int numOps[MAX_THREADS] = {};
for(int i = 0; i < nthreads; i++)
{
threads[i] = std::thread(Worker<Stack, Element>, std::ref(st), elements + i*elem_per_thread, elem_per_thread, &numOps[i], i);
}
running.store(true, boost::memory_order_release);
sleep(test_time);
running.store(false, boost::memory_order_release);
for(int i = 0; i < nthreads; i++)
{
threads[i].join();
ops += numOps[i];
}
delete[] elements;
}
return (double)ops / (test_time*test_iterations);
}
int main()
{
for(int i=1; i<=MAX_THREADS; i++)
{
double lockFreeTime = Test<LockFreeStack, LockFreeElement>(i);
double lockedTime = Test<LockedStack<LockedElement>, LockedElement>(i);
printf("%d threads, LockFree: %d/sec, Locked: %d/sec\n", i, (int)lockFreeTime, (int)lockedTime);
}
return 0;
}

View File

@ -0,0 +1,23 @@
Boost Software License - Version 1.0 - August 17th, 2003
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

166
stack/boost/atomic.hpp Normal file
View File

@ -0,0 +1,166 @@
#ifndef BOOST_ATOMIC_HPP
#define BOOST_ATOMIC_HPP
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <cstddef>
#include <cstdint>
#include "atomic/detail/base.hpp"
#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)
#include "atomic/platform.hpp"
#endif
#include "atomic/detail/type-classifier.hpp"
namespace boost {
#ifndef BOOST_ATOMIC_CHAR_LOCK_FREE
#define BOOST_ATOMIC_CHAR_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_CHAR16_T_LOCK_FREE
#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_CHAR32_T_LOCK_FREE
#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_WCHAR_T_LOCK_FREE
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_SHORT_LOCK_FREE
#define BOOST_ATOMIC_SHORT_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_INT_LOCK_FREE
#define BOOST_ATOMIC_INT_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_LONG_LOCK_FREE
#define BOOST_ATOMIC_LONG_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_LLONG_LOCK_FREE
#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_ADDRESS_LOCK_FREE
#define BOOST_ATOMIC_ADDRESS_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_BOOL_LOCK_FREE
#define BOOST_ATOMIC_BOOL_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_THREAD_FENCE
#define BOOST_ATOMIC_THREAD_FENCE 0
void
atomic_thread_fence(memory_order)
{
}
#endif
#ifndef BOOST_ATOMIC_SIGNAL_FENCE
#define BOOST_ATOMIC_SIGNAL_FENCE 0
void
atomic_signal_fence(memory_order order)
{
atomic_thread_fence(order);
}
#endif
template<typename T>
class atomic : public detail::atomic::base_atomic<T, typename detail::atomic::type_classifier<T>::test, sizeof(T)> {
private:
typedef T value_type;
typedef detail::atomic::base_atomic<T, typename detail::atomic::type_classifier<T>::test, sizeof(T)> super;
public:
atomic(void) : super() {}
explicit atomic(const value_type & v) : super(v) {}
atomic & operator=(value_type v) volatile
{
super::operator=(v);
return *const_cast<atomic *>(this);
}
private:
atomic(const atomic &) /* =delete */ ;
atomic & operator=(const atomic &) /* =delete */ ;
};
typedef atomic<char> atomic_char;
typedef atomic<unsigned char> atomic_uchar;
typedef atomic<signed char> atomic_schar;
typedef atomic<uint8_t> atomic_uint8_t;
typedef atomic<int8_t> atomic_int8_t;
typedef atomic<unsigned short> atomic_ushort;
typedef atomic<short> atomic_short;
typedef atomic<uint16_t> atomic_uint16_t;
typedef atomic<int16_t> atomic_int16_t;
typedef atomic<unsigned int> atomic_uint;
typedef atomic<int> atomic_int;
typedef atomic<uint32_t> atomic_uint32_t;
typedef atomic<int32_t> atomic_int32_t;
typedef atomic<unsigned long> atomic_ulong;
typedef atomic<long> atomic_long;
typedef atomic<uint64_t> atomic_uint64_t;
typedef atomic<int64_t> atomic_int64_t;
#ifdef BOOST_HAS_LONG_LONG
typedef atomic<unsigned long long> atomic_ullong;
typedef atomic<long long> atomic_llong;
#endif
typedef atomic<void*> atomic_address;
typedef atomic<bool> atomic_bool;
class atomic_flag {
public:
atomic_flag(void) : v_(false) {}
bool
test_and_set(memory_order order = memory_order_seq_cst)
{
return v_.exchange(true, order);
}
void
clear(memory_order order = memory_order_seq_cst) volatile
{
v_.store(false, order);
}
private:
atomic_flag(const atomic_flag &) /* = delete */ ;
atomic_flag & operator=(const atomic_flag &) /* = delete */ ;
atomic<bool> v_;
};
typedef atomic<char> atomic_char;
typedef atomic<unsigned char> atomic_uchar;
typedef atomic<signed char> atomic_schar;
typedef atomic<uint8_t> atomic_uint8_t;
typedef atomic<int8_t> atomic_int8_t;
typedef atomic<unsigned short> atomic_ushort;
typedef atomic<short> atomic_short;
typedef atomic<uint16_t> atomic_uint16_t;
typedef atomic<int16_t> atomic_int16_t;
typedef atomic<unsigned int> atomic_uint;
typedef atomic<int> atomic_int;
typedef atomic<uint32_t> atomic_uint32_t;
typedef atomic<int32_t> atomic_int32_t;
typedef atomic<unsigned long> atomic_ulong;
typedef atomic<long> atomic_long;
typedef atomic<uint64_t> atomic_uint64_t;
typedef atomic<int64_t> atomic_int64_t;
typedef atomic<unsigned long long> atomic_ullong;
typedef atomic<long long> atomic_llong;
typedef atomic<void*> atomic_address;
typedef atomic<bool> atomic_bool;
}
#endif

View File

@ -0,0 +1,163 @@
#ifndef BOOST_DETAIL_ATOMIC_BASE_HPP
#define BOOST_DETAIL_ATOMIC_BASE_HPP
// Copyright (c) 2009 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Base class definition and fallback implementation.
// To be overridden (through partial specialization) by
// platform implementations.
#include <string.h>
#include "../../memory_order.hpp"
#ifndef DISABLE_ATOMIC_OPERATORS
#define BOOST_ATOMIC_DECLARE_ASSIGNMENT_OPERATORS \
operator value_type(void) volatile const \
{ \
return load(memory_order_seq_cst); \
} \
\
this_type & \
operator=(value_type v) volatile \
{ \
store(v, memory_order_seq_cst); \
return *const_cast<this_type *>(this); \
}
#else
// locked out
//
#define BOOST_ATOMIC_DECLARE_ASSIGNMENT_OPERATORS
#endif
#define BOOST_ATOMIC_DECLARE_BASE_OPERATORS \
BOOST_ATOMIC_DECLARE_ASSIGNMENT_OPERATORS \
\
bool \
compare_exchange_strong( \
value_type & expected, \
value_type desired, \
memory_order order = memory_order_seq_cst) volatile \
{ \
return compare_exchange_strong(expected, desired, order, calculate_failure_order(order)); \
} \
\
bool \
compare_exchange_weak( \
value_type & expected, \
value_type desired, \
memory_order order = memory_order_seq_cst) volatile \
{ \
return compare_exchange_weak(expected, desired, order, calculate_failure_order(order)); \
} \
\
#ifndef DISABLE_ATOMIC_OPERATORS
#define BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \
value_type \
operator++(int) volatile \
{ \
return fetch_add(1); \
} \
\
value_type \
operator++(void) volatile \
{ \
return fetch_add(1) + 1; \
} \
\
value_type \
operator--(int) volatile \
{ \
return fetch_sub(1); \
} \
\
value_type \
operator--(void) volatile \
{ \
return fetch_sub(1) - 1; \
} \
\
value_type \
operator+=(difference_type v) volatile \
{ \
return fetch_add(v) + v; \
} \
\
value_type \
operator-=(difference_type v) volatile \
{ \
return fetch_sub(v) - v; \
} \
#define BOOST_ATOMIC_DECLARE_BIT_OPERATORS \
value_type \
operator&=(difference_type v) volatile \
{ \
return fetch_and(v) & v; \
} \
\
value_type \
operator|=(difference_type v) volatile \
{ \
return fetch_or(v) | v; \
} \
\
value_type \
operator^=(difference_type v) volatile \
{ \
return fetch_xor(v) ^ v; \
} \
#else
// locked out
//
#define BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS
#define BOOST_ATOMIC_DECLARE_BIT_OPERATORS
#endif
#define BOOST_ATOMIC_DECLARE_POINTER_OPERATORS \
BOOST_ATOMIC_DECLARE_BASE_OPERATORS \
BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \
#define BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS \
BOOST_ATOMIC_DECLARE_BASE_OPERATORS \
BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \
BOOST_ATOMIC_DECLARE_BIT_OPERATORS \
namespace boost {
namespace detail {
namespace atomic {
static inline memory_order
calculate_failure_order(memory_order order)
{
switch(order) {
case memory_order_acq_rel:
return memory_order_acquire;
case memory_order_release:
return memory_order_relaxed;
default:
return order;
}
}
template<typename T, typename C , unsigned int Size>
class base_atomic;
}
}
}
#endif

View File

@ -0,0 +1,434 @@
#ifndef BOOST_DETAIL_ATOMIC_CAS64STRONG_HPP
#define BOOST_DETAIL_ATOMIC_CAS64STRONG_HPP
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Copyright (c) 2011 Helge Bahmann
// Build 64-bit atomic operation from platform_cmpxchg64_strong
// primitive. It is assumed that 64-bit loads/stores are not
// atomic, so they are funnelled through cmpxchg as well.
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/base.hpp>
namespace boost {
namespace detail {
namespace atomic {
/* integral types */
template<typename T, bool Sign>
class base_atomic<T, int, 8, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type expected = v_;
do {
} while (!compare_exchange_strong(expected, v, order, memory_order_relaxed));
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile value_type &>(v_);
do {
} while (!const_cast<base_atomic *>(this)->compare_exchange_strong(v, v, order, memory_order_relaxed));
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
platform_fence_before(success_order);
bool success = platform_cmpxchg64_strong(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
/* pointer types */
template<bool Sign>
class base_atomic<void *, void *, 8, Sign> {
typedef base_atomic this_type;
typedef void * value_type;
typedef ptrdiff_t difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type expected = v_;
do {
} while (!compare_exchange_strong(expected, v, order, memory_order_relaxed));
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile value_type &>(v_);
do {
} while (!const_cast<base_atomic *>(this)->compare_exchange_strong(v, v, order, memory_order_relaxed));
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
platform_fence_before(success_order);
bool success = platform_cmpxchg64_strong(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
value_type
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
template<typename T, bool Sign>
class base_atomic<T *, void *, 8, Sign> {
typedef base_atomic this_type;
typedef T * value_type;
typedef ptrdiff_t difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type expected = v_;
do {
} while (!compare_exchange_strong(expected, v, order, memory_order_relaxed));
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile value_type &>(v_);
do {
} while (!const_cast<base_atomic *>(this)->compare_exchange_strong(v, v, order, memory_order_relaxed));
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
platform_fence_before(success_order);
bool success = platform_cmpxchg64_strong(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
value_type
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
/* generic types */
template<typename T, bool Sign>
class base_atomic<T, void, 8, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef uint64_t storage_type;
public:
explicit base_atomic(value_type v) : v_(0)
{
memcpy(&v_, &v, sizeof(value_type));
}
base_atomic(void) : v_(0) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type expected;
memcpy(&expected, const_cast<storage_type *>(&v_), sizeof(value_type));
do {
} while (!compare_exchange_strong(expected, v, order, memory_order_relaxed));
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v;
memcpy(&v, const_cast<storage_type *>(&v_), sizeof(value_type));
do {
} while (!const_cast<base_atomic *>(this)->compare_exchange_strong(v, v, order, memory_order_relaxed));
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
platform_fence_before(success_order);
bool success = platform_cmpxchg64_strong(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
}
return success;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
}
}
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,79 @@
#ifndef BOOST_DETAIL_ATOMIC_TYPE_CLASSIFIER_HPP
#define BOOST_DETAIL_ATOMIC_TYPE_CLASSIFIER_HPP
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
namespace boost { namespace detail { namespace atomic {
template<typename T>
struct type_classifier {
typedef void test;
};
template<>
struct type_classifier<char> {typedef int test;};
template<>
struct type_classifier<unsigned char> {typedef int test;};
template<>
struct type_classifier<signed char> {typedef int test;};
template<>
struct type_classifier<unsigned short> {typedef int test;};
template<>
struct type_classifier<signed short> {typedef int test;};
template<>
struct type_classifier<unsigned int> {typedef int test;};
template<>
struct type_classifier<signed int> {typedef int test;};
template<>
struct type_classifier<unsigned long> {typedef int test;};
template<>
struct type_classifier<long> {typedef int test;};
#ifdef BOOST_HAS_LONG_LONG
template<> struct type_classifier<unsigned long long>
{typedef int test;};
template<> struct type_classifier<signed long long>
{typedef int test;};
#endif
template<typename T>
struct type_classifier<T *> {typedef void * test;};
template<typename T>
struct sign_trait {
typedef void test;
};
template<>
struct sign_trait<char> {typedef int test;};
template<>
struct sign_trait<unsigned char> {typedef unsigned int test;};
template<>
struct sign_trait<signed char> {typedef int test;};
template<>
struct sign_trait<unsigned short> {typedef unsigned int test;};
template<>
struct sign_trait<signed short> {typedef int test;};
template<>
struct sign_trait<unsigned int> {typedef unsigned int test;};
template<>
struct sign_trait<signed int> {typedef int test;};
template<>
struct sign_trait<unsigned long> {typedef unsigned int test;};
template<>
struct sign_trait<long> {typedef int test;};
#ifdef BOOST_HAS_LONG_LONG
template<> struct sign_trait<unsigned long long>
{typedef unsigned int test;};
template<> struct sign_trait<signed long long>
{typedef int test;};
#endif
}}}
#endif

View File

@ -0,0 +1,43 @@
// Copyright (c) 2009 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Platform selection file
//#include <boost/config.hpp>
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
#include "detail/gcc-x86.hpp"
#elif defined(__GNUC__) && defined(__alpha__)
#include <boost/atomic/detail/gcc-alpha.hpp>
#elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__PPC__))
#include <boost/atomic/detail/gcc-ppc.hpp>
// This list of ARM architecture versions comes from Apple's arm/arch.h header.
// I don't know how complete it is.
#elif defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
|| defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_7A__))
#include <boost/atomic/detail/gcc-armv6+.hpp>
#elif defined(__linux__) && defined(__arm__)
#include <boost/atomic/detail/linux-arm.hpp>
#elif defined(BOOST_USE_WINDOWS_H) || defined(_WIN32_CE) || defined(BOOST_MSVC) || defined(BOOST_INTEL_WIN) || defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__)
#include <boost/atomic/detail/interlocked.hpp>
#elif defined(__GNUC__)
#include <boost/atomic/detail/gcc-cas.hpp>
#endif

View File

@ -0,0 +1,53 @@
#ifndef BOOST_MEMORY_ORDER_HPP_INCLUDED
#define BOOST_MEMORY_ORDER_HPP_INCLUDED
// MS compatible compilers support #pragma once
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
// boost/memory_order.hpp
//
// Defines enum boost::memory_order per the C++0x working draft
//
// Copyright (c) 2008, 2009 Peter Dimov
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
namespace boost
{
//
// Enum values are chosen so that code that needs to insert
// a trailing fence for acquire semantics can use a single
// test such as:
//
// if( mo & memory_order_acquire ) { ...fence... }
//
// For leading fences one can use:
//
// if( mo & memory_order_release ) { ...fence... }
//
// Architectures such as Alpha that need a fence on consume
// can use:
//
// if( mo & ( memory_order_acquire | memory_order_consume ) ) { ...fence... }
//
enum memory_order
{
memory_order_relaxed = 0,
memory_order_acquire = 1,
memory_order_release = 2,
memory_order_acq_rel = 3, // acquire | release
memory_order_seq_cst = 7, // acq_rel | 4
memory_order_consume = 8
};
} // namespace boost
#endif // #ifndef BOOST_MEMORY_ORDER_HPP_INCLUDED

33
stack/locked.h Normal file
View File

@ -0,0 +1,33 @@
#include <mutex>
#include <stack>
template<class T>
class LockedStack
{
public:
void Push(T* entry)
{
std::lock_guard<std::mutex> lock(m_mutex);
m_stack.push(entry);
}
// For compatability with the LockFreeStack interface,
// add an unused int parameter.
//
T* Pop(int)
{
std::lock_guard<std::mutex> lock(m_mutex);
if(m_stack.empty())
{
return nullptr;
}
T* ret = m_stack.top();
m_stack.pop();
return ret;
}
private:
std::stack<T*> m_stack;
std::mutex m_mutex;
};

112
stack/lockfree.h Normal file
View File

@ -0,0 +1,112 @@
#include "atomic.h"
class LockFreeStack
{
public:
// The elements we wish to store should inherit Node
//
struct Node
{
boost::atomic<Node*> next;
};
// Unfortunately, there is no platform independent way to
// define this class. The following definition works in
// gcc on x86_64 architectures
//
class TaggedPointer
{
public:
TaggedPointer(): m_node(nullptr), m_counter(0) {}
Node* GetNode()
{
return m_node.load(boost::memory_order_acquire);
}
uint64_t GetCounter()
{
return m_counter.load(boost::memory_order_acquire);
}
bool CompareAndSwap(Node* oldNode, uint64_t oldCounter, Node* newNode, uint64_t newCounter)
{
bool cas_result;
__asm__ __volatile__
(
"lock cmpxchg16b %0;" // cmpxchg16b sets ZF on success
"setz %3;" // if ZF set, set cas_result to 1
: "+m" (*this), "+a" (oldNode), "+d" (oldCounter), "=q" (cas_result)
: "b" (newNode), "c" (newCounter)
: "cc", "memory"
);
return cas_result;
}
private:
boost::atomic<Node*> m_node;
boost::atomic<uint64_t> m_counter;
}
// 16-byte alignment is required for double-width
// compare and swap
//
__attribute__((aligned(16)));
bool TryPushStack(Node* entry)
{
Node* oldHead;
uint64_t oldCounter;
oldHead = m_head.GetNode();
oldCounter = m_head.GetCounter();
entry->next.store(oldHead, boost::memory_order_relaxed);
return m_head.CompareAndSwap(oldHead, oldCounter, entry, oldCounter + 1);
}
bool TryPopStack(Node*& oldHead, int threadId)
{
oldHead = m_head.GetNode();
uint64_t oldCounter = m_head.GetCounter();
if(oldHead == nullptr)
{
return true;
}
m_hazard[threadId*8].store(oldHead, boost::memory_order_seq_cst);
if(m_head.GetNode() != oldHead)
{
return false;
}
return m_head.CompareAndSwap(oldHead, oldCounter, oldHead->next.load(boost::memory_order_acquire), oldCounter + 1);
}
void Push(Node* entry)
{
while(true)
{
if(TryPushStack(entry))
{
return;
}
usleep(250);
}
}
Node* Pop(int threadId)
{
Node* res;
while(true)
{
if(TryPopStack(res, threadId))
{
return res;
}
usleep(250);
}
}
private:
TaggedPointer m_head;
boost::atomic<Node*> m_hazard[MAX_THREADS*8];
};