[SparkR]

简介: words
words <- flatMap(data, function(line) {
  strsplit(line, " ")[[1]]
})
wordCount <- lapply(words, function(word) {
  list(word, 1L)
})
counts <- reduceBykey(wordCount, "+", 2L)
output <- collect(counts)
for (wordCount in output) {
  cat(wordCount[[1]], ": ", wordCount[[2]], "\n")
}
#dataframe.R
library(SparkR)

# Initialize SparkContext and SQLContext
sc <- sparkR.init(appName="SparkR-DataFrame-example")
sqlContext <- sparkRSQL.init(sc)

hadoop fs -rm /user/yyl/alldata.csv
Sys.setenv('SPARKR_SUBMIT_ARGS'='"--packages" "com.databricks:spark-csv_2.10:1.0.3" "sparkr-shell"')
# Create a DataFrame from a JSON file
path <- file.path("hdfs://hadoop-namenode1:8020/user/yyl/alldata.csv")
peopleDF <- read.text(sqlContext, path)
printSchema(peopleDF)

# Register this DataFrame as a table.
registerTempTable(peopleDF, "people")

# SQL statements can be run by using the sql methods provided by sqlContext
teenagers <- sql(sqlContext, "SELECT name FROM people WHERE age >= 13 AND age <= 19")

# Call collect to get a local data.frame
teenagersLocalDF <- collect(teenagers)

# Print the teenagers in our dataset 
print(teenagersLocalDF)
#data-manipulation.R
library(SparkR)
hadoop dfs -copyFromLocal /opt/cloudera/parcels/spark-1.6.2-bin-cdh5/data/hoho /user/yyl
args <- commandArgs(trailing = TRUE)
# Provides access to a copy of the command line arguments supplied when this R session was invoked.

if (length(args) != 1) {
  print("Usage: data-manipulation.R <path-to-flights.csv")
  print("The data can be downloaded from: http://s3-us-west-2.amazonaws.com/sparkr-data/flights.csv ")
  q("no")
}

## Initialize SparkContext
sc <- sparkR.init(appName = "SparkR-data-manipulation-example")

## Initialize SQLContext
sqlContext <- sparkRSQL.init(sc)

flightsCsvPath <- args[[1]]

# Create a local R dataframe
flights_df <- read.csv(flightsCsvPath, header = TRUE)
flights_df$date <- as.Date(flights_df$date)

## Filter flights whose destination is San Francisco and write to a local data frame
SFO_df <- flights_df[flights_df$dest == "SFO", ] 

# Convert the local data frame into a SparkR DataFrame
SFO_DF <- createDataFrame(sqlContext, SFO_df)

#  Directly create a SparkR DataFrame from the source data
flightsDF <- read.df(sqlContext, flightsCsvPath, source = "com.databricks.spark.csv", header = "true")

# Print the schema of this Spark DataFrame
printSchema(flightsDF)

# Cache the DataFrame
cache(flightsDF)

# Print the first 6 rows of the DataFrame
showDF(flightsDF, numRows = 6) ## Or
head(flightsDF)

# Show the column names in the DataFrame
columns(flightsDF)

# Show the number of rows in the DataFrame
count(flightsDF)

# Select specific columns
destDF <- select(flightsDF, "dest", "cancelled")

# Using SQL to select columns of data
# First, register the flights DataFrame as a table
registerTempTable(flightsDF, "flightsTable")
destDF <- sql(sqlContext, "SELECT dest, cancelled FROM flightsTable")

# Use collect to create a local R data frame
local_df <- collect(destDF)

# Print the newly created local data frame
head(local_df)

# Filter flights whose destination is JFK
jfkDF <- filter(flightsDF, "dest = \"JFK\"") ##OR
jfkDF <- filter(flightsDF, flightsDF$dest == "JFK")

# If the magrittr library is available, we can use it to
# chain data frame operations
if("magrittr" %in% rownames(installed.packages())) {
  library(magrittr)

  # Group the flights by date and then find the average daily delay
  # Write the result into a DataFrame
  groupBy(flightsDF, flightsDF$date) %>%
    summarize(avg(flightsDF$dep_delay), avg(flightsDF$arr_delay)) -> dailyDelayDF

  # Print the computed data frame
  head(dailyDelayDF)
}

# Stop the SparkContext now
sparkR.stop()
目录
相关文章
|
5月前
|
存储 安全 Java
SpringBoot异步任务获取HttpServletRequest
通过上述方法,我们可以在Spring Boot应用中的异步任务获取 `HttpServletRequest`,从而实现更为灵活和高效的异步处理逻辑。
377 64
|
机器学习/深度学习 数据可视化 数据挖掘
数据降维 | MATLAB实现T-SNE降维特征可视化
数据降维 | MATLAB实现T-SNE降维特征可视化
|
Oracle Java 关系型数据库
Spring+Mybatis多数据源配置(二)——databaseIdProvider的使用
 在上一篇同系列的博文中,讲到配置多数据源,然后根据config.properties配置不同的数据库,进行切换。而且需要根据不同的数据库,配置不同的mybatis sql映射配置文件,如下: classpath...
2454 0
|
9月前
|
缓存 监控 NoSQL
【MongoDB 专栏】MongoDB 的变更流(Change Streams)应用
【5月更文挑战第11天】MongoDB的变更流是实时监控数据库动态的机制,允许应用程序订阅并响应文档的插入、更新和删除事件。它提供实时性、灵活性和解耦性,适用于数据同步、实时通知、缓存更新等多种场景。然而,使用时需注意性能、错误处理和版本兼容性。随着技术发展,变更流将在构建智能实时系统中扮演更重要角色,为数据处理带来新机遇。
328 1
【MongoDB 专栏】MongoDB 的变更流(Change Streams)应用
|
9月前
|
机器学习/深度学习 人工智能 搜索推荐
AIGC技术变革推动大规模因材施教逐步落地
AIGC技术变革推动大规模因材施教逐步落地
424 5
AIGC技术变革推动大规模因材施教逐步落地
|
9月前
|
SQL 存储 BI
实时数仓 Hologres产品使用合集之可以直接接入接口吗
实时数仓Hologres是阿里云推出的一款高性能、实时分析的数据库服务,专为大数据分析和复杂查询场景设计。使用Hologres,企业能够打破传统数据仓库的延迟瓶颈,实现数据到决策的无缝衔接,加速业务创新和响应速度。以下是Hologres产品的一些典型使用场景合集。
135 8
|
网络协议 虚拟化
VMware中三种网络连接的区别
1、概述   在安装完虚拟机后,默认安装了两个虚拟网卡,VMnet1和VMnet8,如下图。其中VMnet1是host网卡,用于host方式连接网络的。VMnet8是NAT网卡,用于NAT方式连接网络的。
1171 0

热门文章

最新文章