Complete tasks in parallel

I would like to run a large number of intensive processes in parallel, where I iterate over different parameters using loops. A number of answers to such questions mention that parallel processes can be performed using xargs, but none of them seem to mention if and how this can be done if the parameters are changed for each command.

As an example (pseudocode):

for paramA in 1 2 3
  for paramB in 1 2 3
    ./intensiveCommand $paramA $paramB
  end
end

I would like to parallellize intensiveCommand

Or is it easier to use xargs?

+4
source share
4 answers

You can use GNU parallel . It has an option --loadto avoid overloading the computer.

parallel --load 100% ./intensiveCommand ::: 1 2 3 ::: 1 2 3
+2
number_of_cores=4   #<-- number of processorcores, in my case: 4

for paramA in 1 2 3
do
    for paramB in 1 2 3
    do
        #========== automatic load regulator ==================
        sleep 1
        while [  $( pgrep -c "intensiveCommand" ) -ge "$number_of_cores" ]
        do
            kill -SIGSTOP $$
        done
        #======================================vvvvvvvvvvvvvvvv            

        ( ./intensiveCommand $paramA $paramB ; kill -SIGCONT $$ ) &

    done
done

, , . (. kill -SIGCONT $$). , , .

- .

+1

"1 ", .

#/bin/bash

#use the filedescriptor as a kind of queue to fill the processing slots.

exec 3< <(

    for PARAM_A in 1 2 3
    do
        for PARAM_B in 1 2 3
        do
             echo $PARAM_A $PARAM_B
        done
    done
)

#4 seperate processing slots running parallel 
while read -u 3 PARA PARB; do "intensiveCommand $PARA $PARB" ; done &
while read -u 3 PARA PARB; do "intensiveCommand $PARA $PARB" ; done &
while read -u 3 PARA PARB; do "intensiveCommand $PARA $PARB" ; done &
while read -u 3 PARA PARB; do "intensiveCommand $PARA $PARB" ; done &

#only exit when 100% sure that all processes ended
while pgrep "intensiveCommand" &>"/dev/null" ; do wait ; done
+1

, - , , .

#!/bin/bash
################################################################################
# File: core
# Author: Mark Setchell
# 
# Primitive, but effective tool for managing parallel execution of jobs in the
# shell. Based on, and requiring REDIS.
#
# Usage:
#
# core -i 8 # Initialise to 8 cores, or specify 0 to use all available cores
# for i in {0..63}
# do
#   # Wait for a core, do a process, release core
#   (core -p; process; core -v)&
# done
# wait
################################################################################
function usage {
    echo "Usage: core -i ncores # Initialise with ncores. Use 0 for all cores."
    echo "       core -p        # Wait (forever) for free core."
    echo "       core -v        # Release core."
    exit 1
}

function init {
    # Delete list of cores in REDIS
    echo DEL cores | redis-cli > /dev/null 2>&1
    for i in `seq 1 $NCORES`
    do
       # Add another core to list of cores in REDIS
       echo LPUSH cores 1 | redis-cli > /dev/null 2>&1
    done
    exit 0
}

function WaitForCore {
    # Wait forever for a core to be available
    echo BLPOP cores 0 | redis-cli > /dev/null 2>&1
    exit 0
}

function ReleaseCore {
    # Release or give back a core
    echo LPUSH cores 1 | redis-cli > /dev/null 2>&1
    exit 0
}

################################################################################
# Main
################################################################################
while getopts "i:pv" optname
  do
    case "$optname" in
      "i")
        if [ $OPTARG -lt 1 ]; then
           NCORES=`sysctl -n hw.logicalcpu`;    # May differ if not on OSX, maybe "nproc" on Linux
        else
           NCORES=$OPTARG
        fi
    init $NCORES
        ;;
      "p")
    WaitForCore
        ;;
      "v")
    ReleaseCore
        ;;
      "?")
        echo "Unknown option $OPTARG"
        ;;
    esac
done
usage

, 10 ( 80), 16 5 :

core -i 8 
for i in {0..15}
do
   # Wait for a core, do a process, release core
   (core -p ; sleep 5 ; core -v)&
done
wait
0

All Articles