Spark如何批量存取HBase
这篇文章将为大家详细讲解有关Spark如何批量存取HBase ,小编觉得挺实用的,因此分享给大家做个参考,希望大家阅读完这篇文章后可以有所收获。
让客户满意是我们工作的目标,不断超越客户的期望值来自于我们对这个行业的热爱。我们立志把好的技术通过有效、简单的方式提供给客户,将通过不懈努力成为客户在信息化领域值得信任、有价值的长期合作伙伴,公司提供的服务项目有:域名与空间、网页空间、营销软件、网站建设、龙湖网站维护、网站推广。
FileAna.scala
object FileAna {
// val conf: Configuration = HBaseConfiguration.create()
val hdfsPath = "hdfs://master:9000"
val hdfs = FileSystem.get(new URI(hdfsPath), new Configuration())
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("FileAna").setMaster("spark://master:7077").
set("spark.driver.host", "192.168.1.127").
setJars(List("/home/pang/woozoomws/spark-service.jar",
"/home/pang/woozoomws/spark-service/lib/hbase/hbase-common-1.2.2.jar",
"/home/pang/woozoomws/spark-service/lib/hbase/hbase-client-1.2.2.jar",
"/home/pang/woozoomws/spark-service/lib/hbase/hbase-protocol-1.2.2.jar",
"/home/pang/woozoomws/spark-service/lib/hbase/htrace-core-3.1.0-incubating.jar",
"/home/pang/woozoomws/spark-service/lib/hbase/hbase-server-1.2.2.jar",
"/home/pang/woozoomws/spark-service/lib/hbase/metrics-core-2.2.0.jar"))
val sc = new SparkContext(conf)
val rdd = sc.textFile("hdfs://master:9000/woozoom/msgfile.txt")
val rdd2 = rdd.map(x => convertToHbase(anaMavlink(x)))
val hbaseConf = HBaseConfiguration.create()
hbaseConf.addResource("/home/hadoop/software/hbase-1.2.2/conf/hbase-site.xml");
val jobConf = new JobConf(hbaseConf, this.getClass)
jobConf.setOutputFormat(classOf[TableOutputFormat])
jobConf.set(TableOutputFormat.OUTPUT_TABLE, "MissionItem")
rdd2.saveAsHadoopDataset(jobConf)
sc.stop()
}
def convertScanToString(scan: Scan) = {
val proto = ProtobufUtil.toScan(scan)
Base64.encodeBytes(proto.toByteArray)
}
def convertToHbase(msg: MAVLinkMessage) = {
val p = new Put(Bytes.toBytes(UUID.randomUUID().toString()))
if (msg.isInstanceOf[msg_mission_item]) {
val missionItem = msg.asInstanceOf[msg_mission_item]
p.addColumn(Bytes.toBytes("data"), Bytes.toBytes("x"), Bytes.toBytes(missionItem.x))
p.addColumn(Bytes.toBytes("data"), Bytes.toBytes("y"), Bytes.toBytes(missionItem.y))
p.addColumn(Bytes.toBytes("data"), Bytes.toBytes("z"), Bytes.toBytes(missionItem.z))
}
(new ImmutableBytesWritable, p)
}
val anaMavlink = (str: String) => {
val bytes = ByteAndHex.hexStringToBytes(str)
QuickParser.parse(bytes).unpack()
}
}
ReadHBase.scala
object ReadHBase {
// val conf: Configuration = HBaseConfiguration.create()
val hdfsPath = "hdfs://master:9000"
val hdfs = FileSystem.get(new URI(hdfsPath), new Configuration())
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("FileAna").setMaster("spark://master:7077").
set("spark.driver.host", "192.168.1.127").
setJars(List("/home/pang/woozoomws/spark-service.jar",
"/home/pang/woozoomws/spark-service/lib/hbase/hbase-common-1.2.2.jar",
"/home/pang/woozoomws/spark-service/lib/hbase/hbase-client-1.2.2.jar",
"/home/pang/woozoomws/spark-service/lib/hbase/hbase-protocol-1.2.2.jar",
"/home/pang/woozoomws/spark-service/lib/hbase/htrace-core-3.1.0-incubating.jar",
"/home/pang/woozoomws/spark-service/lib/hbase/hbase-server-1.2.2.jar",
"/home/pang/woozoomws/spark-service/lib/hbase/metrics-core-2.2.0.jar"))
val sc = new SparkContext(conf)
val hbaseConf = HBaseConfiguration.create()
hbaseConf.addResource("/home/hadoop/software/hbase-1.2.2/conf/hbase-site.xml");
hbaseConf.set(TableInputFormat.INPUT_TABLE, "MissionItem")
val scan = new Scan()
hbaseConf.set(TableInputFormat.SCAN, convertScanToString(scan))
val readRDD = sc.newAPIHadoopRDD(hbaseConf, classOf[TableInputFormat],
classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
classOf[org.apache.hadoop.hbase.client.Result])
val count = readRDD.count()
println("Mission Item Count:" + count)
sc.stop()
}
def convertScanToString(scan: Scan) = {
val proto = ProtobufUtil.toScan(scan)
Base64.encodeBytes(proto.toByteArray)
}
}
关于“Spark如何批量存取HBase ”这篇文章就分享到这里了,希望以上内容可以对大家有一定的帮助,使各位可以学到更多知识,如果觉得文章不错,请把它分享出去让更多的人看到。
新闻标题:Spark如何批量存取HBase
转载来源:http://ybzwz.com/article/jcjigi.html