Below is a screenshot of Spark magazine running on port 8080:

The "Shuffle Read" and "Shuffle Write" options are always empty for this code:
import org.apache.spark.SparkContext; object first { println("Welcome to the Scala worksheet") val conf = new org.apache.spark.SparkConf() .setMaster("local") .setAppName("distances") .setSparkHome("C:\\spark-1.1.0-bin-hadoop2.4\\spark-1.1.0-bin-hadoop2.4") .set("spark.executor.memory", "2g") val sc = new SparkContext(conf) def euclDistance(userA: User, userB: User) = { val subElements = (userA.features zip userB.features) map { m => (m._1 - m._2) * (m._1 - m._2) } val summed = subElements.sum val sqRoot = Math.sqrt(summed) println("value is" + sqRoot) ((userA.name, userB.name), sqRoot) } case class User(name: String, features: Vector[Double]) def createUser(data: String) = { val id = data.split(",")(0) val splitLine = data.split(",") val distanceVector = (splitLine.toList match { case h :: t => t }).map(m => m.toDouble).toVector User(id, distanceVector) } val dataFile = sc.textFile("c:\\data\\example.txt") val users = dataFile.map(m => createUser(m)) val cart = users.cartesian(users) // val distances = cart.map(m => euclDistance(m._1, m._2)) //> distances : org.apache.spark.rdd.RDD[((String, String), Double)] = MappedR //| DD[4] at map at first.scala:46 val d = distances.collect // d.foreach(println) //> ((a,a),0.0) //| ((a,b),0.0) //| ((a,c),1.0) //| ((a,),0.0) //| ((b,a),0.0) //| ((b,b),0.0) //| ((b,c),1.0) //| ((b,),0.0) //| ((c,a),1.0) //| ((c,b),1.0) //| ((c,c),0.0) //| ((c,),0.0) //| ((,a),0.0) //| ((,b),0.0) //| ((,c),0.0) //| ((,),0.0) }
Why are the "Shuffle Read" and "Shuffle Write" fields empty? Can the code be configured to fill in these fields to understand how