The web scrapes several links in R with a similar URL using a for or lapply loop

This code is reset from here http://www.bls.gov/schedule/news_release/2015_sched.htm every date that contains the employment statement in the Release column.

pg <- read_html("http://www.bls.gov/schedule/news_release/2015_sched.htm")

# target only the <td> elements under the bodytext div
body <- html_nodes(pg, "div#bodytext")

# we use this new set of nodes and a relative XPath to get the initial <td> elements, then get their siblings
es_nodes <- html_nodes(body, xpath=".//td[contains(., 'Employment Situation for')]/../td[1]")

# clean up the cruft and make our dates!
nfpdates2015 <- as.Date(trimws(html_text(es_nodes)), format="%A, %B %d, %Y")

###thanks @hrbrmstr for this###

I would like to repeat that for other URLs containing other years, it is named the same when only the year number changes. In particular, for the following URLs:

#From 2008 to 2015
http://www.bls.gov/schedule/news_release/2015_sched.htm
http://www.bls.gov/schedule/news_release/2014_sched.htm
...
http://www.bls.gov/schedule/news_release/2008_sched.htm

rvest, HTML XML . for, . , 2015 , , . , , . .

+4
2

url, paste0 statment

for(i in 2008:2015){

  url <- paste0("http://www.bls.gov/schedule/news_release/", i, "_sched.htm")
  pg <- read_html(url)

  ## all your other code goes here.

}

lapply, .

lst <- lapply(2008:2015, function(x){
  url <- paste0("http://www.bls.gov/schedule/news_release/", x, "_sched.htm")

  ## all your other code goes here.
  pg <- read_html(url)

  # target only the <td> elements under the bodytext div
  body <- html_nodes(pg, "div#bodytext")

  # we use this new set of nodes and a relative XPath to get the initial <td> elements, then get their siblings
  es_nodes <- html_nodes(body, xpath=".//td[contains(., 'Employment Situation for')]/../td[1]")

  # clean up the cruft and make our dates!
  nfpdates <- as.Date(trimws(html_text(es_nodes)), format="%A, %B %d, %Y")
  return(nfpdates)
})

 lst
[[1]]
 [1] "2008-01-04" "2008-02-01" "2008-03-07" "2008-04-04" "2008-05-02" "2008-06-06" "2008-07-03" "2008-08-01" "2008-09-05"
[10] "2008-10-03" "2008-11-07" "2008-12-05"

[[2]]
 [1] "2009-01-09" "2009-02-06" "2009-03-06" "2009-04-03" "2009-05-08" "2009-06-05" "2009-07-02" "2009-08-07" "2009-09-04"
[10] "2009-10-02" "2009-11-06" "2009-12-04"

## etc...
+6

sprintf ( )

url <- sprintf("http://www.bls.gov/schedule/news_release/%d_sched.htm", 2008:2015)
url
#[1] "http://www.bls.gov/schedule/news_release/2008_sched.htm" "http://www.bls.gov/schedule/news_release/2009_sched.htm"
#[3] "http://www.bls.gov/schedule/news_release/2010_sched.htm" "http://www.bls.gov/schedule/news_release/2011_sched.htm"
#[5] "http://www.bls.gov/schedule/news_release/2012_sched.htm" "http://www.bls.gov/schedule/news_release/2013_sched.htm"
#[7] "http://www.bls.gov/schedule/news_release/2014_sched.htm" "http://www.bls.gov/schedule/news_release/2015_sched.htm"

library(rvest)
lst <-  lapply(url, function(x) {

   pg <- read_html(x)
   body <- html_nodes(pg, "div#bodytext")
   es_nodes <- html_nodes(body, xpath=".//td[contains(., 'Employment Situation for')]/../td[1]")

   nfpdates <- as.Date(trimws(html_text(es_nodes)), format="%A, %B %d, %Y")
   nfpdates
  })

head(lst, 3)
#[[1]]
# [1] "2008-01-04" "2008-02-01" "2008-03-07" "2008-04-04" "2008-05-02" "2008-06-06" "2008-07-03" "2008-08-01"
# [9] "2008-09-05" "2008-10-03" "2008-11-07" "2008-12-05"

#[[2]]
# [1] "2009-01-09" "2009-02-06" "2009-03-06" "2009-04-03" "2009-05-08" "2009-06-05" "2009-07-02" "2009-08-07"
# [9] "2009-09-04" "2009-10-02" "2009-11-06" "2009-12-04"

#[[3]]
# [1] "2010-01-08" "2010-02-05" "2010-03-05" "2010-04-02" "2010-05-07" "2010-06-04" "2010-07-02" "2010-08-06"
# [9] "2010-09-03" "2010-10-08" "2010-11-05" "2010-12-03"
+6

All Articles