聊聊数据库~SQL基础篇 (6)

知识点:

-- 把ip转换成int select inet_aton('43.226.128.3'); -- inet6_aton() -- 把int转换成ip select inet_ntoa('736264195'); -- inet6_ntoa() ipv6 -- 将多个字符串连接成一个字符串 select concat(user_id, ',', file_name, ',', ip, ',', url) as concat_str from file_records; -- 将多个字符串连接成一个字符串+可以一次性指定分隔符 select concat_ws(',', user_id, file_name, ip, url) as concat_str from file_records; -- 在有group by的查询语句中,select指定的字段要么就包含在group by语句的后面,作为分组的依据,要么就包含在聚合函数中 -- group_concat():将group by产生的同一个分组中的值连接起来,返回一个字符串结果 select group_concat(file_name) as file_name, url, count(*) from file_records group by url; -- having一般对group by的结果进行筛选,where是对原表进行筛选 select group_concat(file_name) as file_name, group_concat(url) as url, count(*) as count from file_records group by url having count >= 3; -- 四舍五入到指定位数 select round(3.12345, 4); -- 存小数数据为了不损伤精读一般都是转成整数,eg:3.1415 ==> 整数:31415,倍数:10000

数据构造

city_data.sql:https://github.com/lotapp/BaseCode/blob/master/database/SQL/city2017.sql

-- 编号,文件名,文件MD5,Meta(媒体类型),当前用户,请求IP,来源地址,请求时间,数据状态 drop table if exists file_records; create table if not exists file_records ( id int unsigned auto_increment primary key, file_name varchar(100) not null, md5 char(32) not null, meta_type tinyint unsigned not null default 1, user_id int unsigned not null, ip int unsigned not null, url varchar(200) not null default 'http://www.likecs.com/', createtime datetime not null, -- default now(), datastatus tinyint not null default 0 ); -- 可以插入2~3次(方便下面演示) insert into file_records(file_name, md5, meta_type, user_id, ip, url, createtime, datastatus) values ('2.zip', '3aa2db9c1c058f25ba577518b018ed5b', 2, 1, inet_aton('43.226.128.3'), 'http://baidu.com', now(), 1), ('3.rar', '6f401841afd127018dad402d17542b2c', 3, 3, inet_aton('43.224.12.3'), 'http://qq.com', now(), 1), ('7.jpg', 'fe5df232cafa4c4e0f1a0294418e5660', 4, 5, inet_aton('58.83.17.3'), 'http://360.cn', now(), 1), ('9.png', '7afbb1602613ec52b265d7a54ad27330', 5, 4, inet_aton('103.3.152.3'), 'http://cnblogs.com', now(), 1), ('1.gif', 'b5e9b4f86ce43ca65bd79c894c4a924c', 6, 3, inet_aton('114.28.0.3'), 'http://qq.com', now(), 1), ('大马.jsp', 'abbed9dcc76a02f08539b4d852bd26ba', 9, 4, inet_aton('220.181.108.178'), 'http://baidu.com', now(), 99); 4.2.SQLServer

select语句执行流程:

from 表

join类型 join 表 on 条件

where 条件

对select的结果进行过滤

group by 字段

根据指定条件把查询结果进行分组,以用做聚合运算

having 条件

对分组聚合运算(group by)后的结果进行过滤

select distinct 字段

order by 字段 [asc|desc]

根据指定字段对查询结果进行排序(默认升序asc)

top 多少行

类比limit

增删改 -- 4.1.插入 help insert -- 自增长主键和默认值的字段可以不写 insert into dotnetcrazy.dbo.users(username, password, email, tel, usercode, createtime, updatetime, datastatus) values ('dnt', '7c4a8d09ca3762af61e59520943dc26494f8941b', 'dnt@qq.com', '18738002038', newid(), getdate(), getdate(), 1); -- 批量插入 SQLServer一次批量插入最多1000行左右 insert into dotnetcrazy.dbo.users(username, password, email, tel, usercode, createtime, updatetime, datastatus) values ('xxx', '7c4a8d09ca3762af61e59520943dc26494f8942b', 'xxx@qq.com', '13738002038', newid(), getdate(), getdate(), 0), ('mmd', '7c4a8d09ca3762af61e59520943dc26494f8941b', 'mmd@qq.com', '13738002038', newid(), getdate(), getdate(), 1), ('小明', '7c4a8d09ca3762af61e59520943dc26494f8941b', 'xiaoming@qq.com', '13718002038', newid(), getdate(), getdate(), 1), ('小张', '7c4a8d09ca3762af61e59520943dc26494f8941b', 'zhang@qq.com', '13728002038', newid(), getdate(), getdate(), 1), ('小潘', '7c4a8d09ca3762af61e59520943dc26494f8941b', 'pan@qq.com', '13748002038', newid(), getdate(), getdate(), 1), ('小周', '7c4a8d09ca3762af61e59520943dc26494f8941b', 'zhou@qq.com', '13758002038', newid(), getdate(), getdate(), 1), ('小罗', '7c4a8d09ca3762af61e59520943dc26494f8941b', 'luo@qq.com', '13768002038', newid(), getdate(), getdate(), 1); -- 4.2.修改 help update update dotnetcrazy.dbo.users set datastatus=99, updatetime = getdate() where username = 'mmd'; -- 一定要有where条件!开发中一般都是先写where条件再写update -- 4.3.删除 -- 删除数据(自增长不重置)help delete; delete from dotnetcrazy.dbo.users where datastatus = 0; -- 删除全部数据(自增长重置)help truncate; truncate table dotnetcrazy.dbo.users; 查询 -- 查询来源url(去重后) select distinct url from file_records; -- 查询来源url(分组方式) select url from file_records group by url; -- 分别统计一下url出现的次数(分组+聚合) -- 分组一般都和聚合函数一起使用 select url, count(*) as count from file_records group by url; -- 分别统计一下url出现的次数,已经删除的文件不算进去 select url, count(*) as count from file_records group by url having count(*) > 3; -- 在group by的结果上筛选,★写成count就不行了★ -- 分别统计一下url出现的次数并查出对应的id -- SQLServer2017新增string_agg select ids =(select stuff((select ',' + cast(id as varchar(20)) from file_records as f where f.url = file_records.url for xml path ('')), 1, 1, '')),url from file_records group by url; -- 内连接查询 innet join tb_name on 关联条件 select file_records.id, users.id as uid, users.username, users.email, file_records.file_name, file_records.md5, file_records.ip, file_records.url from users inner join file_records on file_records.user_id = users.id -- 连接条件 where users.datastatus = 1 and file_records.datastatus = 1 order by file_records.file_name desc; -- 文件名降序排序 -- 显示前5个数据 select top 5 * from file_records; -- 分页查询 第3页,每页5条 select * from (select row_number() over (order by username desc, file_name desc) as id, file_records.id as fid, users.id as uid, users.username, users.email, file_records.file_name, file_records.md5, file_records.ip, file_records.url from file_records inner join users on file_records.user_id = users.id) as temp where id > (3 - 1) * 5 and id <= 3 * 5; -- 简单提一下视图: -- 存在就删除 if exists(select * from sysobjects where name = N'view_userinfo') begin drop view view_userinfo end -- 创建视图 create view view_userinfo as select id, username, password, email, tel, datastatus from users; -- 查询视图 select id, username, password, email, tel, datastatus from view_userinfo; 附录

内容版权声明:除非注明,否则皆为本站原创文章。

转载注明出处:https://www.heiqu.com/wspwyg.html