mapreduce实现浏览该商品的人大多数还浏览了经典应用

2024-06-16 20:18

本文主要是介绍mapreduce实现浏览该商品的人大多数还浏览了经典应用,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!

输入:

日期    ...cookie id.        ...商品id..

xx            xx                        xx

输出:

商品id         商品id列表(按优先级排序,用逗号分隔)

xx                   xx

比如:

id1              id3,id0,id4,id2

id2             id0,id5

整个计算过程分为4步

1、提取原始日志日期,cookie id,商品id信息,按天计算,最后输出数据格式

商品id-0 商品id-1

xx           x x         

这一步做了次优化,商品id-0一定比商品id-1小,为了减少存储,在最后汇总数据转置下即可

reduce做局部排序及排重

 

2、基于上次的结果做汇总,按天计算

商品id-0 商品id-1  关联值(关联值即同时访问这两个商品的用户数)

xx             x x                xx

 

3、汇总最近三个月数据,同时考虑时间衰减,时间越久关联值的贡献越低,最后输出两两商品的关联值(包括转置后)

 

4、行列转换,生成最后要的推荐结果数据,按关联值排序生成

 

第一个MR

import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.log4j.Logger;
/*
* 输入:原始数据,会有重复
*日期 cookie 楼盘id
* 
* 输出:
* 日期 楼盘id1 楼盘id2  //楼盘id1一定小于楼盘id2 ,按日期 cookie进行分组
* 
*/
public class HouseMergeAndSplit {
public static class Partitioner1 extends Partitioner<TextPair, Text> {
@Override
public int getPartition(TextPair key, Text value, int numParititon) {
return Math.abs((new Text(key.getFirst().toString()+key.getSecond().toString())).hashCode() * 127) % numParititon;
}
}
public static class Comp1 extends WritableComparator {
public Comp1() {
super(TextPair.class, true);
}
@SuppressWarnings("unchecked")
public int compare(WritableComparable a, WritableComparable b) {
TextPair t1 = (TextPair) a;
TextPair t2 = (TextPair) b;
int comp= t1.getFirst().compareTo(t2.getFirst());
if (comp!=0)
return comp;
return t1.getSecond().compareTo(t2.getSecond());
}
}
public static class TokenizerMapper 
extends Mapper<LongWritable, Text, TextPair, Text>{
Text val=new Text("test");
public void map(LongWritable key, Text value, Context context
) throws IOException, InterruptedException {
String s[]=value.toString().split("\001");	    	
TextPair tp=new TextPair(s[0],s[1],s[4]+s[3]); //thedate cookie city+houseid
context.write(tp, val);
}
}
public static class IntSumReducer 
extends Reducer<TextPair,Text,Text,Text> {
private static String comparedColumn[] = new String[3];
ArrayList<String> houselist= new ArrayList<String>();
private static Text keyv = new Text();
private static Text valuev = new Text();
static Logger logger = Logger.getLogger(HouseMergeAndSplit.class.getName());
public void reduce(TextPair key, Iterable<Text> values, 
Context context
) throws IOException, InterruptedException {
houselist.clear();
String thedate=key.getFirst().toString();
String cookie=key.getSecond().toString();  
for (int i=0;i<3;i++)
comparedColumn[i]="";
//first+second为分组键,每次不同重新调用reduce函数
for (Text val:values)
{
if (thedate.equals(comparedColumn[0]) && cookie.equals(comparedColumn[1])&&  !key.getThree().toString().equals(comparedColumn[2]))
{
// context.write(new Text(key.getFirst()+" "+key.getSecond().toString()), new Text(key.getThree().toString()+" first"+ " "+comparedColumn[0]+" "+comparedColumn[1]+" "+comparedColumn[2]));
houselist.add(key.getThree().toString());
comparedColumn[0]=key.getFirst().toString();
comparedColumn[1]=key.getSecond().toString();
comparedColumn[2]=key.getThree().toString();
}
if (!thedate.equals(comparedColumn[0])||!cookie.equals(comparedColumn[1]))
{
//  context.write(new Text(key.getFirst()+" "+key.getSecond().toString()), new Text(key.getThree().toString()+" second"+ " "+comparedColumn[0]+" "+comparedColumn[1]+" "+comparedColumn[2]));
houselist.add(key.getThree().toString());
comparedColumn[0]=key.getFirst().toString();
comparedColumn[1]=key.getSecond().toString();
comparedColumn[2]=key.getThree().toString();
}
}
keyv.set(comparedColumn[0]); //日期
//valuev.set(houselist.toString());
//logger.info(houselist.toString());
//context.write(keyv,valuev);
for (int i=0;i<houselist.size()-1;i++)
{
for (int j=i+1;j<houselist.size();j++)
{    valuev.set(houselist.get(i)+"	"+houselist.get(j)); //关联的楼盘
context.write(keyv,valuev);
}
} 
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length != 2) {
System.err.println("Usage: wordcount <in> <out>");
System.exit(2);
}
FileSystem fstm = FileSystem.get(conf);   
Path outDir = new Path(otherArgs[1]);   
fstm.delete(outDir, true);
conf.set("mapred.textoutputformat.separator", "\t"); //reduce输出时key value中间的分隔符
Job job = new Job(conf, "HouseMergeAndSplit");
job.setNumReduceTasks(4);
job.setJarByClass(HouseMergeAndSplit.class);
job.setMapperClass(TokenizerMapper.class);
job.setMapOutputKeyClass(TextPair.class);
job.setMapOutputValueClass(Text.class);
// 设置partition
job.setPartitionerClass(Partitioner1.class);
// 在分区之后按照指定的条件分组
job.setGroupingComparatorClass(Comp1.class);
// 设置reduce
// 设置reduce的输出
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
//job.setNumReduceTasks(18);
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}

TextPair

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
public class TextPair implements WritableComparable<TextPair> {
private Text first;
private Text second;
private Text three;
public TextPair() {
set(new Text(), new Text(),new Text());
}
public TextPair(String first, String second,String three) {
set(new Text(first), new Text(second),new Text(three));
}
public TextPair(Text first, Text second,Text Three) {
set(first, second,three);
}
public void set(Text first, Text second,Text three) {
this.first = first;
this.second = second;
this.three=three;
}
public Text getFirst() {
return first;
}
public Text getSecond() {
return second;
}
public Text getThree() {
return three;
}
public void write(DataOutput out) throws IOException {
first.write(out);
second.write(out);
three.write(out);
}
public void readFields(DataInput in) throws IOException {
first.readFields(in);
second.readFields(in);
three.readFields(in);
}
public int compareTo(TextPair tp) {
int cmp = first.compareTo(tp.first);
if (cmp != 0) {
return cmp;
}
cmp= second.compareTo(tp.second);
if (cmp != 0) {
return cmp;
}
return three.compareTo(tp.three);
}
}


TextPairSecond

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
public class TextPairSecond implements WritableComparable<TextPairSecond> {
private Text first;
private FloatWritable second;
public TextPairSecond() {
set(new Text(), new FloatWritable());
}
public TextPairSecond(String first, float second) {
set(new Text(first), new FloatWritable(second));
}
public TextPairSecond(Text first, FloatWritable second) {
set(first, second);
}
public void set(Text first, FloatWritable second) {
this.first = first;
this.second = second;
}
public Text getFirst() {
return first;
}
public FloatWritable getSecond() {
return second;
}
public void write(DataOutput out) throws IOException {
first.write(out);
second.write(out);
}
public void readFields(DataInput in) throws IOException {
first.readFields(in);
second.readFields(in);
}
public int compareTo(TextPairSecond tp) {
int cmp = first.compareTo(tp.first);
if (cmp != 0) {
return cmp;
}
return second.compareTo(tp.second);
}
}

 

第二个MR

import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.log4j.Logger;
/*
*  统计楼盘之间共同出现的次数
* 输入:
* 日期 楼盘1 楼盘2
* 
* 输出:
* 日期 楼盘1 楼盘2 共同出现的次数
* 
*/
public class HouseCount {
public static class TokenizerMapper 
extends Mapper<LongWritable, Text, Text, IntWritable>{
IntWritable iw=new IntWritable(1);
public void map(LongWritable key, Text value, Context context
) throws IOException, InterruptedException {
context.write(value, iw);
}
}
public static class IntSumReducer 
extends Reducer<Text,IntWritable,Text,IntWritable> {
IntWritable result=new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values, 
Context context
) throws IOException, InterruptedException {
int sum=0;
for (IntWritable iw:values)
{
sum+=iw.get();
}
result.set(sum);
context.write(key, result)	;
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length != 2) {
System.err.println("Usage: wordcount <in> <out>");
System.exit(2);
}
FileSystem fstm = FileSystem.get(conf);   
Path outDir = new Path(otherArgs[1]);   
fstm.delete(outDir, true);
conf.set("mapred.textoutputformat.separator", "\t"); //reduce输出时key value中间的分隔符
Job job = new Job(conf, "HouseCount");
job.setNumReduceTasks(2);
job.setJarByClass(HouseCount.class);
job.setMapperClass(TokenizerMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 设置reduce
// 设置reduce的输出
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//job.setNumReduceTasks(18);
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}


第三个MR

import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.log4j.Logger;
/*
* 汇总近三个月统计楼盘之间共同出现的次数,考虑衰减系数, 并最后a b 转成 b a输出一次
* 输入:
* 日期  楼盘1 楼盘2 共同出现的次数
* 
* 输出
* 楼盘1 楼盘2 共同出现的次数(考虑了衰减系数,每天的衰减系数不一样)
* 
*/
public class HouseCountHz {
public static class HouseCountHzMapper 
extends Mapper<LongWritable, Text, Text, FloatWritable>{
Text keyv=new Text();
FloatWritable valuev=new FloatWritable();
public void map(LongWritable key, Text value, Context context
) throws IOException, InterruptedException {
String[] s=value.toString().split("\t");
keyv.set(s[1]+"	"+s[2]);//楼盘1,楼盘2
Calendar date1=Calendar.getInstance();
Calendar d2=Calendar.getInstance();
Date b = null;
SimpleDateFormat sdf=new SimpleDateFormat("yyyy-MM-dd");
try {
b=sdf.parse(s[0]);
} catch (ParseException e) {
e.printStackTrace();
}
d2.setTime(b);
long n=date1.getTimeInMillis();
long birth=d2.getTimeInMillis();
long sss=n-birth;
int day=(int)((sss)/(3600*24*1000)); //该条记录的日期与当前日期的日期差
float factor=1/(1+(float)(day-1)/10); //衰减系数
valuev.set(Float.parseFloat(s[3])*factor);
context.write(keyv, valuev);
}
}
public static class HouseCountHzReducer 
extends Reducer<Text,FloatWritable,Text,FloatWritable> {
FloatWritable result=new FloatWritable();
Text keyreverse=new Text();
public void reduce(Text key, Iterable<FloatWritable> values, 
Context context
) throws IOException, InterruptedException {
float sum=0;
for (FloatWritable iw:values)
{
sum+=iw.get();
}
result.set(sum);
String[] keys=key.toString().split("\t");
keyreverse.set(keys[1]+"	"+keys[0]);
context.write(key, result)	;
context.write(keyreverse, result)	;
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length != 2) {
System.err.println("Usage: wordcount <in> <out>");
System.exit(2);
}
FileSystem fstm = FileSystem.get(conf);   
Path outDir = new Path(otherArgs[1]);   
fstm.delete(outDir, true);
conf.set("mapred.textoutputformat.separator", "\t"); //reduce输出时key value中间的分隔符
Job job = new Job(conf, "HouseCountHz");
job.setNumReduceTasks(2);
job.setJarByClass(HouseCountHz.class);
job.setMapperClass(HouseCountHzMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FloatWritable.class);
// 设置reduce
// 设置reduce的输出
job.setReducerClass(HouseCountHzReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FloatWritable.class);
//job.setNumReduceTasks(18);
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}


第四个MR

import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
/*
* 输入数据:
* 楼盘1 楼盘2 共同出现的次数
* 
* 输出数据
*  楼盘1 楼盘2,楼盘3,楼盘4 (按次数排序)
*/
public class HouseRowToCol {
public static class Partitioner1 extends Partitioner<TextPairSecond, Text> {
@Override
//分区
public int getPartition(TextPairSecond key, Text value, int numParititon) {
return Math.abs((new Text(key.getFirst().toString()+key.getSecond().toString())).hashCode() * 127) % numParititon;
}
}
//分组
public static class Comp1 extends WritableComparator {
public Comp1() {
super(TextPairSecond.class, true);
}
@SuppressWarnings("unchecked")
public int compare(WritableComparable a, WritableComparable b) {
TextPairSecond t1 = (TextPairSecond) a;
TextPairSecond t2 = (TextPairSecond) b;
return t1.getFirst().compareTo(t2.getFirst());
}
}
//排序
public static class KeyComp extends WritableComparator {
public KeyComp() {
super(TextPairSecond.class, true);
}
@SuppressWarnings("unchecked")
public int compare(WritableComparable a, WritableComparable b) {
TextPairSecond t1 = (TextPairSecond) a;
TextPairSecond t2 = (TextPairSecond) b;
int comp= t1.getFirst().compareTo(t2.getFirst());
if (comp!=0)
return comp;
return -t1.getSecond().compareTo(t2.getSecond());
}
} 
public static class HouseRowToColMapper 
extends Mapper<LongWritable, Text, TextPairSecond, Text>{
Text houseid1=new Text();
Text houseid2=new Text();
FloatWritable weight=new FloatWritable();
public void map(LongWritable key, Text value, Context context
) throws IOException, InterruptedException {
String s[]=value.toString().split("\t");
weight.set(Float.parseFloat(s[2]));
houseid1.set(s[0]);
houseid2.set(s[1]);
TextPairSecond tp=new TextPairSecond(houseid1,weight); 
context.write(tp, houseid2);
}
}
public static class HouseRowToColReducer 
extends Reducer<TextPairSecond,Text,Text,Text> {
Text valuev=new Text();
public void reduce(TextPairSecond key, Iterable<Text> values, 
Context context
) throws IOException, InterruptedException {
Text keyv=key.getFirst();
Iterator<Text> it=values.iterator();
StringBuilder sb=new StringBuilder(it.next().toString());
while(it.hasNext())
{
sb.append(","+it.next().toString());
}
valuev.set(sb.toString());
context.write(keyv, valuev);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length != 2) {
System.err.println("Usage: wordcount <in> <out>");
System.exit(2);
}
FileSystem fstm = FileSystem.get(conf);   
Path outDir = new Path(otherArgs[1]);   
fstm.delete(outDir, true);
conf.set("mapred.textoutputformat.separator", "\t"); //reduce输出时key value中间的分隔符
Job job = new Job(conf, "HouseRowToCol");
job.setNumReduceTasks(4);
job.setJarByClass(HouseRowToCol.class);
job.setMapperClass(HouseRowToColMapper.class);
job.setMapOutputKeyClass(TextPairSecond.class);
job.setMapOutputValueClass(Text.class);
// 设置partition
job.setPartitionerClass(Partitioner1.class);
// 在分区之后按照指定的条件分组
job.setGroupingComparatorClass(Comp1.class);
job.setSortComparatorClass(KeyComp.class);
// 设置reduce
// 设置reduce的输出
job.setReducerClass(HouseRowToColReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
//job.setNumReduceTasks(18);
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}




 

 

这篇关于mapreduce实现浏览该商品的人大多数还浏览了经典应用的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!



http://www.chinasem.cn/article/1067445

相关文章

使用Python实现IP地址和端口状态检测与监控

《使用Python实现IP地址和端口状态检测与监控》在网络运维和服务器管理中,IP地址和端口的可用性监控是保障业务连续性的基础需求,本文将带你用Python从零打造一个高可用IP监控系统,感兴趣的小伙... 目录概述:为什么需要IP监控系统使用步骤说明1. 环境准备2. 系统部署3. 核心功能配置系统效果展

Python实现微信自动锁定工具

《Python实现微信自动锁定工具》在数字化办公时代,微信已成为职场沟通的重要工具,但临时离开时忘记锁屏可能导致敏感信息泄露,下面我们就来看看如何使用Python打造一个微信自动锁定工具吧... 目录引言:当微信隐私遇到自动化守护效果展示核心功能全景图技术亮点深度解析1. 无操作检测引擎2. 微信路径智能获

Python中pywin32 常用窗口操作的实现

《Python中pywin32常用窗口操作的实现》本文主要介绍了Python中pywin32常用窗口操作的实现,pywin32主要的作用是供Python开发者快速调用WindowsAPI的一个... 目录获取窗口句柄获取最前端窗口句柄获取指定坐标处的窗口根据窗口的完整标题匹配获取句柄根据窗口的类别匹配获取句

在 Spring Boot 中实现异常处理最佳实践

《在SpringBoot中实现异常处理最佳实践》本文介绍如何在SpringBoot中实现异常处理,涵盖核心概念、实现方法、与先前查询的集成、性能分析、常见问题和最佳实践,感兴趣的朋友一起看看吧... 目录一、Spring Boot 异常处理的背景与核心概念1.1 为什么需要异常处理?1.2 Spring B

Python位移操作和位运算的实现示例

《Python位移操作和位运算的实现示例》本文主要介绍了Python位移操作和位运算的实现示例,文中通过示例代码介绍的非常详细,对大家的学习或者工作具有一定的参考学习价值,需要的朋友们下面随着小编来一... 目录1. 位移操作1.1 左移操作 (<<)1.2 右移操作 (>>)注意事项:2. 位运算2.1

如何在 Spring Boot 中实现 FreeMarker 模板

《如何在SpringBoot中实现FreeMarker模板》FreeMarker是一种功能强大、轻量级的模板引擎,用于在Java应用中生成动态文本输出(如HTML、XML、邮件内容等),本文... 目录什么是 FreeMarker 模板?在 Spring Boot 中实现 FreeMarker 模板1. 环

Qt实现网络数据解析的方法总结

《Qt实现网络数据解析的方法总结》在Qt中解析网络数据通常涉及接收原始字节流,并将其转换为有意义的应用层数据,这篇文章为大家介绍了详细步骤和示例,感兴趣的小伙伴可以了解下... 目录1. 网络数据接收2. 缓冲区管理(处理粘包/拆包)3. 常见数据格式解析3.1 jsON解析3.2 XML解析3.3 自定义

SpringMVC 通过ajax 前后端数据交互的实现方法

《SpringMVC通过ajax前后端数据交互的实现方法》:本文主要介绍SpringMVC通过ajax前后端数据交互的实现方法,本文给大家介绍的非常详细,对大家的学习或工作具有一定的参考借鉴价... 在前端的开发过程中,经常在html页面通过AJAX进行前后端数据的交互,SpringMVC的controll

Spring Security自定义身份认证的实现方法

《SpringSecurity自定义身份认证的实现方法》:本文主要介绍SpringSecurity自定义身份认证的实现方法,下面对SpringSecurity的这三种自定义身份认证进行详细讲解,... 目录1.内存身份认证(1)创建配置类(2)验证内存身份认证2.JDBC身份认证(1)数据准备 (2)配置依

利用python实现对excel文件进行加密

《利用python实现对excel文件进行加密》由于文件内容的私密性,需要对Excel文件进行加密,保护文件以免给第三方看到,本文将以Python语言为例,和大家讲讲如何对Excel文件进行加密,感兴... 目录前言方法一:使用pywin32库(仅限Windows)方法二:使用msoffcrypto-too