Take every k-th element from a series of natural numbers (1 .. n)

For example, we have rows 1, 2, 3, 4, 5. Take every 3 element => 3, 1, 5, 2, 4 (the selected element should not remain, we can take it, but the row is not empty). A naive circular double list implementation is not a good idea. Can you advise me which data structures and algorithms are more applicable?

+7
algorithm data-structures search josephus
source share
4 answers

Create a complete binary tree containing numbers from 1 to n, for example. for n = 15, which will be:

start

In each branch, save the number of nodes to the left of it; this will allow us to quickly find the i-th node. (You will see that this tree has a very predictable structure and values, and its generation is much more efficient than creating a binary tree of the same size with random values. It is also an ideal candidate for a -an-array tree.)

Then, to find the ith number, start from the root of the node and on each node, if I am more than the number of nodes on the left, you find the i-th number number, else go left (if I am not more than the number of nodes to the left) or to the right ( if I have more than 1 more than the number of nodes on the left).

Whenever you go left, decrease the number of nodes to the left of this node (because we will delete it).

When you go right, decrease the number you are looking for by the number of nodes to the left of the node, plus 1 (or plus 0 if the value in the node has been erased).

When you find the i-th node, read its value (add to the list of removal order), and then set its value to 0. After that, if the i-th node is searched because its value is erased, we will go to the right and then take leftmost node.

We start with the value i = k, and then every time we erase the number in the ith node, we reduce the total number of nodes and set i = (i + k - 1) % total (or if it's zero: i = total ) .

This gives log 2 N search time and overall complexity of N x LogN.


An example of a pass: with n = 15 (as in the image above) and k = 6 , the first steps are 6, 12, 3, 10, 2. At this point, the situation is as follows:

step 5: number 2

We just deleted the second number, and now i = 2 + 6 - 1 = 7 . We start with the root of the node, which has 4 nodes to the left of it and still has its value, so we go to the right and subtract 5 from the 7 that we are looking for and get 2. We come to node 12 (which was deleted), and find there 2 nodes to the left of it, so we reduce the number of nodes to the left of it, and then go to the left. We come to node 10 (which was removed) and find that it has 1 node to its left, and 1 = 2 - 1, so this is the node we are looking for; however, since its value has been erased, we go right and subtract 1 from 2 that we are looking for and get 1. We arrive at node 11, which has 0 nodes to its left (because it is a leaf), and 0 = 1 - 1, so this is the node we are looking for.

step 6: number 11

Then we reduce the total number of nodes from 10 to 9 and I update from 7 to (7 + 6 - 1) % 9 = 3 and continue to search for the third node (which is now a value with a value of 5).


Here is a simple implementation in JavaScript. It solves a series of 100,000 numbers in less than a second, and it can probably be done faster and more economically using the tree structure in the array.

(Unlike the explanation above, the indexes of numbers are zero-based to simplify the code, so index 0 is the first number in the tree, and we are looking for a node with the number of left-bound children, which is equal to the target index.)

 function Tree(size) { // CONSTRUCTOR var height = Math.floor(Math.log(size) / Math.log(2)); this.root = addNode(height, 1 << height, size); this.size = size; function addNode(height, value, max) { // RECURSIVE TREE-BUILDER var node = {value: value > max ? 0 : value, lower: (1 << height) - 1}; if (height--) { node.left = addNode(height, value - (1 << height), max); if (value < max) { // DON'T ADD UNNECESSARY RIGHT NODES node.right = addNode(height, value + (1 << height), max); } } return node; } } Tree.prototype.cut = function(step) { // SEE ANSWER FOR DETAILS var sequence = [], index = (step - 1) % this.size; while (this.size) { var node = this.root, target = index; while (node.lower != target || node.value == 0) { if (target < node.lower) { --node.lower; node = node.left; } else { target -= node.lower + (node.value ? 1 : 0); node = node.right; } } sequence.push(node.value); node.value = 0; index = (index + step - 1) % --this.size; } return sequence; } var tree = new Tree(15); var sequence = tree.cut(6); document.write("15/6&rarr;" + sequence + "<BR>"); tree = new Tree(100000); sequence = tree.cut(123456); document.write("100000/123456&rarr;" + sequence); 

NOTE.

If you look at the tree for n = 10, you will see that the node to the right of the root has an incomplete tree with two nodes to the left, but the algorithm implemented in the code example above gives it an invalid left- node 3 instead of 2:

tree for n = 10

However, nodes with an incomplete tree to their left never matter on their own and never have nodes on their right. That way, you always go there, and the fact that their left node count is too big doesn't matter.

+5
source share

If you just need the last number, it is known as the Josephus problem , and there are well-known formulas for calculating the answer in O(N) time.

I do not know if it can be adapted to run a full simulation, so I will describe the direct solution O(N log N) here:

Let all numbers be stored in a variable with implicit keys. We need to find the k element and delete it at each step (in fact, there may be a shift, so it looks more like (cur_shift + k) % cur_size , but it does not really matter). It can be done. We just need to split it into 3 parts [0, k - 1] , [k, k] and [k + 1, cur_size - 1] , print the number in the node that corresponds to the second part, and combine the first and last part together. It requires O(log N) time per step, so it should be good enough for these constraints.

+6
source share

Here is a binary tree array implementation that only saves the size of the left subtree as node. The input array is not actually stored, but it is silently assumed that these are leaves at a lower level below the binary tree:

 function josephusPermutation(size, step) { var len = 1 << 32 - Math.clz32(size-1), // Smallest power of 2 >= size tree = Array(len).fill(0), // Create tree in array representation current = 0, skip = step - 1, result = Array(size).fill(0), goRight, leftSize, order, i, j; // Initialise tree with sizes of left subtrees as node values (function init(i) { if (i >= len) return +(i - len < size); // Only count when within size var left = tree[i] = init(i*2); // recursive, only store left-size return left + (left ? init(i*2+1) : 0); // return sum of left and right })(1); for (j = 0; j < result.length; j++, size--) { current = (current + skip) % size; // keep within range order = current; for (i = 1; i < len; i = i*2+goRight) { leftSize = tree[i]; goRight = order >= leftSize; if (goRight) { order -= leftSize; // Moving rightward, counting what is at left side. } else { tree[i]--; // we will remove value at left side } } result[j] = 1 + i - len; } return result; } var sequence = josephusPermutation(100000, 123456); console.log(sequence.join(',')); 
+2
source share

The following is an implementation of the Lei Wang and Xiaodong Wang (2013) 1 O(n log k) algorithm (very similar to the Errol Lloyd algorithm, if not based on it, published in 1983). The idea is to split the original sequence into n/m binary trees with a height of log k . The algorithm is actually designed for the "feline" Josephus problem, where participants can have more than one life (listed in the array variable below, global.l ).

I also like O(1) Knuth, Arens, and Kaplansky spatial algorithms (outlined in the master's thesis by Grigory Wilson, California State University, Hayward, 1979) 2 , which take longer to process, although it can be quite fast depending on parameters.

Knuths algorithm for J(n,d,t) ( t is a hit ith ), a descending sequence:

 Let x1 = d * t and for k = 2,3,..., let x_k = ⌊(d * x_(kβˆ’1) βˆ’ d * n βˆ’ 1) / (d βˆ’ 1)βŒ‹ Then J(n,d,t) = x_p where x_p is the first term in the sequence <= n. 

Arens Algorithm for J(n,d,t) , ascending sequence:

 Let a1 = 1 and for k = 2,3,... let a_k = ⌈(n βˆ’ t + a_(kβˆ’1)) * d / (d βˆ’ 1)βŒ‰ If a_r is the first term in the sequence such that a_r + 1 β‰₯ d * t + 1 then J(n,d,t) = d * t + 1 βˆ’ a_r. 

Kaplanskis algorithm for J(n,d,t) :

 Let Z+ be the set of positive integers and for k =1,2,...,t define a mapping P_k : Z+ β†’ Z+ by P_k(m) = (m+dβˆ’1)βˆ’(nβˆ’k+1)(mβˆ’k+dβˆ’1)/(nβˆ’k+1) Then, J(n,d,t) = P1 β—¦ P2 β—¦Β·Β·Β·β—¦Pt(t). 

JavaScript Code:

 var global = { n: 100000, k: 123456, l: new Array(5).fill(1), m: null, b: null, a: [], next: [], prev: [], i: 0, limit: 5, r: null, t: null } function init(params){ global.m = Math.pow(2, Math.ceil(Math.log2(params.k))); params.b = Math.ceil(params.n / global.m); for (let i=0; i<params.b; i++){ let s = i * global.m, t = (i + 1) * global.m, u = []; for (let j=0; j<global.m; j++) u[j] = 0; for (let j=s; j<=Math.min(t-1,params.n-1); j++) u[js] = -(j + 1); global.a[i] = []; build(u, global.a[i]); t = (i + 1) % params.b; params.next[i] = t; params.prev[t] = i; } } function build(u,v){ function count(_v, i){ if (global.m < i + 2){ if (_v[i] < 0) return 1; else return 0; } else { _v[i] = count(_v, 2*i + 1); _v[i] = _v[i] + count(_v, 2*i + 2); return _v[i]; } } for (let i=0; i<global.m; i++) v[global.m + i - 1] = u[i]; count(v, 0); } function algorithmL(n, b){ global.r = 0; global.t = b - 1; while (global.i < global.limit){ tree(global, global); let j = leaf(global, global); hit(global.i,j,global,global); global.i = global.i + 1; } } function tree(params_r,params_t){ if (params_t.t === global.next[params_t.t] && params_r.r < global.k){ params_r.r = global.k + global.a[params_t.t][0] - 1 - (global.k - params_r.r - 1) % global.a[params_t.t][0]; } else { while (params_r.r < global.k){ params_t.t = global.next[params_t.t]; params_r.r = params_r.r + global.a[params_t.t][0]; } } } function size(t,j){ if (global.a[t][j] < 0) return 1 return global.a[t][j]; } function leaf(params_r,params_t){ let j = 0, nxt = params_r.r - global.k; while (j + 1 < global.m){ let rs = size(params_t.t, 2*j + 2); if (params_r.r - rs < global.k){ j = 2*j + 2; } else { j = 2*j + 1; params_r.r = params_r.r - rs; } } params_r.r = nxt; return j; } function hit(i,j,params_r,params_t){ let h = -global.a[params_t.t][j]; console.log(h); if (global.l[h-1] > 1) global.l[h-1] = global.l[h-1] - 1; else kill(i,j,params_r,params_t); } function kill(i,j,params_r,params_t){ global.a[params_t.t][j] = 0; while (j > 0){ j = Math.floor((j - 1) / 2); global.a[params_t.t][j] = global.a[params_t.t][j] - 1; } if (params_t.t !== global.next[params_t.t]){ if (global.a[params_t.t][0] + global.a[global.next[params_t.t]][0] === global.m){ params_r.r = params_r.r + global.a[global.next[params_t.t]][0]; combine(params_t); } else if (global.a[params_t.t][0] + global.a[global.prev[params_t.t]][0] === global.m){ t = global.prev[params_t.t]; combine(params_t); } } } function combine(params_t){ let x = global.next[params_t.t], i = 0, u = []; for (let j=0; j<global.m; j++) if (global.a[params_t.t][global.m + j - 1] < 0){ u[i] = global.a[params_t.t][global.m + j - 1]; i = i + 1; } for (let j=0; j<global.m; j++) if (global.a[x][global.m + j - 1] < 0){ u[i] = global.a[x][global.m + j - 1]; i = i + 1; } build(u,global.a[params_t.t]); global.next[params_t.t] = global.next[global.next[params_t.t]]; global.prev[global.next[params_t.t]] = params_t.t; } init(global); algorithmL(global.n, global.b); 

(1) L. Wang and H. Wang. A comparative study of the algorithms of the generalized Josephus problem. Applied Mathematics and Computer Science, 7, No. 4, 1451-1457 (2013).

(2) Wilson References (1979):

Knut, D.E., The Art of Computer Programming, Addison-Wesley, Reading the Mass., Vol I Fundamental Algorithms, 1968, ed. 22, p158; Volume III, Sorting and Searching, Example. 2, p. 18-19; Volume I, 2nd ed., P. 181.

Arens, Wisconsin, Institute of Mathematics and Spire, Theubner: Leipzig, 1901, chapter 15, 286-301.

Kaplansky, I. and I. Gerstein, Mathematics, Chelsea, New York, 1978, pp. 121-128.

+1
source share

All Articles