15 #include "../../sequential/sequential/sequential.hpp" 29 const std::set<nat_type> &previous_bad_table,
31 unsigned int range_size) {
32 assert(nb_thread >= 1);
35 assert(first_n <= last_n);
42 const nat_type bad_first_n = (previous_bad_table.empty()
46 std::set<nat_type> bad_table(previous_bad_table);
48 std::set<nat_type>* new_bad_tables =
new std::set<nat_type>[nb_thread]();
49 std::thread*
threads =
new std::thread[nb_thread];
51 for (
nat_type n = first_n; n <= last_n; ) {
56 unsigned int nb_slave_required = 0;
60 for (
unsigned int i = 1; (i < nb_thread) && (n <= last_n); ++i, n += range_size) {
62 threads[i] = std::thread
63 ([bad_first_n, bad_last_n, i, last_n, n, range_size,
64 &bad_table, &new_bad_tables] {
67 (n, std::min(n + range_size - 1, last_n),
69 bad_first_n, bad_last_n,
77 (master_n, std::min(master_n + range_size - 1, last_n),
79 bad_first_n, bad_last_n,
83 for (
unsigned int i = 1; i <= nb_slave_required; ++i) {
88 for (
unsigned int i = 0; i < nb_thread; ++i) {
89 if (!new_bad_tables[i].empty()) {
90 bad_table.insert(new_bad_tables[i].cbegin(), new_bad_tables[i].cend());
94 new_bad_tables[i].clear();
99 delete[] new_bad_tables;
109 const std::set<nat_type> &previous_bad_table,
111 unsigned int range_size,
112 unsigned int master_range_size) {
113 assert(nb_thread >= 1);
115 assert(3 <= first_n);
116 assert(first_n <= last_n);
124 const nat_type bad_first_n = (previous_bad_table.empty()
128 std::set<nat_type> bad_table(previous_bad_table);
131 std::set<nat_type>* new_bad_tables =
new std::set<nat_type>[nb_thread]();
133 std::set<nat_type>* current_bad_tables =
new std::set<nat_type>[nb_thread]();
134 bool* is_finisheds =
new bool[nb_thread];
135 std::thread*
threads =
new std::thread[nb_thread];
138 for (
unsigned int i = 1; i < nb_thread; ++i) {
139 threads[i] = std::thread([] {});
140 is_finisheds[i] =
true;
141 first_ns[i] = first_n;
144 for (
nat_type n = first_n; n <= last_n; ) {
151 n += master_range_size;
153 for (
unsigned int i = 1; (i < nb_thread) && (n <= last_n); ++i) {
154 if (is_finisheds[i]) {
155 is_finisheds[i] =
false;
162 bad_table.insert(new_bad_tables[i].cbegin(), new_bad_tables[i].cend());
166 current_bad_tables[i] = bad_table;
170 threads[i] = std::thread
171 ([bad_first_n, bad_last_n, i, range_size, last_n, n,
172 ¤t_bad_tables, &first_ns, &is_finisheds, &new_bad_tables] {
176 std::min(n + range_size - 1, last_n),
177 current_bad_tables[i],
178 bad_first_n, bad_last_n,
180 current_bad_tables[i].clear();
181 is_finisheds[i] =
true;
193 std::min(first_ns[0] + master_range_size - 1, last_n),
195 bad_first_n, bad_last_n,
199 bad_table.insert(new_bad_tables[0].cbegin(), new_bad_tables[0].cend());
203 new_bad_tables[0].clear();
207 for (
unsigned int i = 1; i < nb_thread; ++i) {
211 bad_table.insert(new_bad_tables[i].cbegin(), new_bad_tables[i].cend());
218 delete[] is_finisheds;
219 delete[] new_bad_tables;
229 const std::set<nat_type> &previous_bad_table,
231 assert(nb_thread >= 1);
233 assert(3 <= first_n);
234 assert(first_n <= last_n);
240 const nat_type bad_first_n = (previous_bad_table.empty()
244 std::set<nat_type> bad_table(previous_bad_table);
246 bool* is_lowers =
new bool[nb_thread];
248 std::thread*
threads =
new std::thread[nb_thread];
250 for (
nat_type n = first_n; n <= last_n; ) {
251 unsigned int nb_thread_required = 0;
255 for ( ; (nb_thread_required < nb_thread) && (n <= last_n); n += 2) {
257 ns[nb_thread_required] = n;
259 if (nb_thread_required > 0) {
260 threads[nb_thread_required] = std::thread
261 ([bad_first_n, nb_thread_required, is_lowers, ns,
263 is_lowers[nb_thread_required]
266 bad_first_n, ns[0] - 1);
270 ++nb_thread_required;
278 bad_first_n, ns[0] - 1);
281 for (
unsigned int i = 1; i < nb_thread_required; ++i) {
286 for (
unsigned int i = 0; i < nb_thread_required; ++i) {
288 bad_table.insert(ns[i]);
290 std::cout << ns[i] << std::endl;
std::vector< nat_type > sequential_print_in_order(const std::set< nat_type > &ns)
Print number from ns, in increasing order and return a list of these number in the same order...
std::set< nat_type > sequential_check_gentle_varsigma_odd(nat_type first_n, nat_type last_n, bool print_bad)
Check in the order all odd gentle numbers between first_n and last_n, and if print_bad then print all...
constexpr bool is_odd(nat_type n)
Return true iff n is odd.
constexpr bool is_even(nat_type n)
Return true iff n is even.
bool sequential_is_varsigma_odd_lower(nat_type n, const std::set< nat_type > &bad_table, nat_type bad_first_n)
Return true iff varsigma_odd(n) < n.
std::set< nat_type > threads_check_gentle_varsigma_odd__dynamic(unsigned int nb_thread, nat_type first_n, nat_type last_n, const std::set< nat_type > &previous_bad_table, bool print_bad, unsigned int range_size, unsigned int master_range_size)
Check in the order all odd gentle numbers between first_n and last_n, and if print_bad then print all...
Implementation of the threads parallel algorithms presented in the report.
constexpr nat_type MAX_POSSIBLE_N
Lower bound of the bigger number such that it is possible to compute the result of the sigma function...
std::set< nat_type > threads_check_gentle_varsigma_odd__by_range(unsigned int nb_thread, nat_type first_n, nat_type last_n, const std::set< nat_type > &previous_bad_table, bool print_bad, unsigned int range_size)
constexpr nat_type sequential_min_array(const nat_type ns[], size_t size)
Return the minimum of the first size values of ns.
sigmaodd::nat_type nat_type
std::set< nat_type > threads_check_gentle_varsigma_odd__one_by_one(unsigned int nb_thread, nat_type first_n, nat_type last_n, const std::set< nat_type > &previous_bad_table, bool print_bad)
constexpr bool is_first_mersenne_prime_unitary_divide_or_square(nat_type n)
Return true iff is_first_mersenne_prime_unitary_divide(n) or is_square(n).