How to effectively split a large data table into two according to a rule that includes 2 columns?

What is the most efficient way (time and space) to split the following table

dt = data.table(x=c(1,3,5,4,6,2), y=c(4,7,1,1,2,6))
> dt
   x y
1: 1 4
2: 3 7
3: 5 1
4: 4 1
5: 6 2
6: 2 6

into two separate tables: dt1 and dt2, such that dt1 contains all the rows (x, y) iff (y, x) is also a row in dt, and dt2 contains other rows:

> dt1
   x y
1: 1 4
2: 4 1
3: 6 2
4: 2 6

> dt2
   x y
1: 3 7
2: 5 1

Efficiency critical, full table has nearly 200M rows

+4
source share
3 answers

Another option is to backlink to yourself

indx <- sort.int(dt[unique(dt), on = c(x = "y", y = "x"), which = TRUE, nomatch = 0L])

dt[indx]
#    x y
# 1: 1 4
# 2: 4 1
# 3: 6 2
# 4: 2 6

dt[-indx]
#    x y
# 1: 3 7
# 2: 5 1

Benchmark . If you don't need order, my solution seems to be faster for 200MM strings (both solutions are unordered)

set.seed(123)
bigdt <- data.table(x = sample(1e3, 2e8, replace = TRUE),
                    y = sample(1e3, 2e8, replace = TRUE))

system.time(i1 <- bigdt[, .I[.N>1] ,.(X=pmax(x,y), Y=pmin(y,x))]$V1)
# user  system elapsed 
# 21.81    0.82   22.97 

system.time(indx <- bigdt[unique(bigdt), on = c(x = "y", y = "x"), which = TRUE, nomatch = 0L])
#  user  system elapsed 
# 17.74    0.90   18.80 

# Checking if both unsorted and if identical when sorted
is.unsorted(i1)
# [1] TRUE
is.unsorted(indx)
# [1] TRUE

identical(sort.int(i1), sort.int(indx))
# [1] TRUE

And here is a non-degenerate case (where indx != bigdt[, .I]):

set.seed(123)
n  = 1e7
nv = 1e4
DT <- data.table(x = sample(nv, n, replace = TRUE), y = sample(nv, n, replace = TRUE))

library(microbenchmark)
microbenchmark(
  akrun  = {
    idx = DT[, .I[.N > 1], by=.(pmax(x,y), pmin(x,y))]$V1
    list(DT[idx], DT[-idx])
  },
  akrun2 = {
    idx = DT[,{
      x1 <- paste(pmin(x,y), pmax(x,y))
      duplicated(x1)|duplicated(x1, fromLast=TRUE)
    }]
    list(DT[idx], DT[!idx])
  },
  davida = {
    idx = DT[unique(DT), on = c(x = "y", y = "x"), which = TRUE, nomatch = 0L]
    list(DT[idx], DT[-idx])
  },
  akrun3 = {
    n = DT[, N := .N, by = .(pmax(x,y), pmin(x,y))]$N
    DT[, N := NULL]
    split(DT, n > 1L)
  }, times = 1)

Unit: seconds
   expr       min        lq      mean    median        uq       max neval
  akrun  7.056609  7.056609  7.056609  7.056609  7.056609  7.056609     1
 akrun2 22.810844 22.810844 22.810844 22.810844 22.810844 22.810844     1
 davida  2.738918  2.738918  2.738918  2.738918  2.738918  2.738918     1
 akrun3  5.662700  5.662700  5.662700  5.662700  5.662700  5.662700     1
+5

i1 <- dt[, .I[.N>1] ,.(X=pmax(x,y), Y=pmin(y,x))]$V1
dt[i1]
#   x y
#1: 1 4
#2: 4 1
#3: 6 2
#4: 2 6

dt[-i1]
#   x y
#1: 3 7
#2: 5 1

, duplicated

 i1 <-  dt[,{x1 <- paste(pmin(x,y), pmax(x,y))
         duplicated(x1)|duplicated(x1, fromLast=TRUE) }]
 dt[i1]
 dt[!i1]
+4

Just to follow the answers of @David Arenburg and @akrun (definitely don't try to post a new one, this is just for comment), I also timed the sorting functions:

library(microbenchmark)
library(data.table)
library(dplyr)

set.seed(123)
bigdt <- data.table(x = sample(1e3, 2e8, replace = TRUE),
                    y = sample(1e3, 2e8, replace = TRUE))

f1 <- function() bigdt[, .I[.N>1] ,.(X=pmax(x,y), Y=pmin(y,x))]$V1
f2 <- function() bigdt[, .I[.N>1] ,.(X=pmax(x,y), Y=pmin(y,x))] %>% setorder(V1) %>% .[, V1]
f3 <- function() bigdt[unique(bigdt), on = c(x = "y", y = "x"), which = TRUE, nomatch = 0L]
f4 <- function() sort.int(bigdt[unique(bigdt), on = c(x = "y", y = "x"), which = TRUE, nomatch = 0L])

res <- microbenchmark(
    i1 <- f1(),
    i2 <- f2(),
    i3 <- f3(),
    i4 <- f4(),
    times = 2L)

print(res)

WITH

is.unsorted(i1)  # TRUE
is.unsorted(i2)  # FALSE
is.unsorted(i3)  # TRUE
is.unsorted(i4)  # FALSE

identical(sort.int(i1), i2)  # TRUE
identical(sort.int(i3), i4)  # TRUE
identical(i2, i4)            # TRUE

And the results are as follows:

Unit: seconds
       expr      min       lq     mean   median       uq      max neval cld
 i1 <- f1() 21.18695 21.18695 21.42634 21.42634 21.66572 21.66572     2 a  
 i2 <- f2() 47.16270 47.16270 47.79535 47.79535 48.42799 48.42799     2  b 
 i3 <- f3() 19.67623 19.67623 20.11365 20.11365 20.55108 20.55108     2 a  
 i4 <- f4() 57.21732 57.21732 57.78666 57.78666 58.35600 58.35600     2   c

Summing up :

  • f3() Is falsified for an unsorted result
  • f2() faster when counting sorting.
+2
source

All Articles