每周都要手动做,有点小头疼,虽然学习收集这些数据的方法固然重要,但是如果掌握了后就希望能把时间节省下来,用在分析这些数据上。加上自己有点小开发经验,于是使用SHELL,AWK各种咚咚,现用现学,狠狠整了3天终于搞定了,通过联邦,把所有的需要分析的数据都拿到本地入库。有点小成就感哦~
以下分别描述这三个部分
*********************************************check_weekly.sh***********************************************************************************
#author:wangpeng
#date:2012-04-10
#purpose:the main program of weekly check
#取得当前日期,格式为20120411
date=`date +%Y%m%d`
printf "n--------------------Begin Collection $date--------------------n">>"check_weekly.log"
#定义一个数组,存放需要收集信息的库在本地编目的数据库别名,用空格分开
a=(SAMPLE)
#相应的库上的用户名和密码,都用空格分开
b=(db2inst1 db2inst1)
db2 connect to check
#根据数据库的个数,进行循环收集信息
printf "n第一阶段,收集各个数据库的视图信息n">>"check_weekly.log"
num=${#a[*]}
for((i=0;i<num;i++)){
printf "n${a[i]}n">>"check_weekly.log";
db2 "INSERT INTO WEEKLY.DBM_CFG SELECT $date,'${a[i]}',NAME,VALUE,VALUE_FLAGS FROM ${a[i]}_DBMCFG">>"check_weekly.log";
db2 "INSERT INTO WEEKLY.DB_CFG SELECT $date,'${a[i]}',NAME,VALUE,VALUE_FLAGS FROM ${a[i]}_DBCFG">>"check_weekly.log";
db2 "INSERT INTO WEEKLY.TBSP_SIZE SELECT $date,'${a[i]}',A.TBSP_ID,A.TBSP_NAME,A.TBSP_TYPE,A.TBSP_CONTENT_TYPE,A.TBSP_PAGE_SIZE,B.TBSP_TOTAL_PAGES,B.TBSP_USABLE_PAGES,B.TBSP_USED_PAGES,B.TBSP_FREE_PAGES FROM ${a[i]}_TBSP A,${a[i]}_TBSP_PART B WHERE A.TBSP_ID=B.TBSP_ID">>"check_weekly.log";
db2 "INSERT INTO WEEKLY.TB_SIZE select $date,'${a[i]}',TBSPACEID,TBSPACE,TABLEID,TABNAME,NPAGES from ${a[i]}_TB where TBSPACE not in('SYSCATSPACE','SYSTOOLSPACE') or (tbspace is null and npages<> -1) order by TBSPACEID,TABLEID">>"check_weekly.log";
db2 "INSERT INTO WEEKLY.BP_SIZE SELECT $date,'${a[i]}',BP_NAME,DATA_LOGICAL_READS,DATA_PHYSICAL_READS,INDEX_LOGICAL_READS,INDEX_PHYSICAL_READS,CASE WHEN (DATA_LOGICAL_READS+DATA_PHYSICAL_READS+INDEX_LOGICAL_READS+INDEX_PHYSICAL_READS)=0 THEN 0 ELSE (DATA_LOGICAL_READS+INDEX_LOGICAL_READS)*1.0/(DATA_LOGICAL_READS+DATA_PHYSICAL_READS+INDEX_LOGICAL_READS+INDEX_PHYSICAL_READS)*100 END FROM ${a[i]}_BP">>"check_weekly.log";
db2 "INSERT INTO WEEKLY.LOCKS SELECT $date,'${a[i]}',DEADLOCKS,LOCK_WAITS,LOCK_WAIT_TIME,LOCK_TIMEOUTS,TOTAL_SORTS,SORT_OVERFLOWS FROM ${a[i]}_SNAPDB">>"check_weekly.log";
}
db2 terminate
#第二部分,收集REORGCHK信息,因为REORGCHK只能通过REORGCHK命令和一个FUNCTION得到,联邦无法实现,所以采用落地文件的形式收集信息
#遍历连接所有的库
for((i=0,j=0;i<num;i++,j+=2)){
db2 connect to ${a[i]} user ${b[j]} using ${b[j+1]}
#把REORGCHK的结果集进行AWK处理,结果分成两股流,分别进入文件reorgchk_tab.del,reorgchk_ind.del
printf "n开始${a[i]}的REORGCHK,并修改数据格式">>"check_weekly.log"
db2 reorgchk current statistics|awk -f reorgchk_97.awk
db2 terminate
#文件入库
db2 connect to check
printf "n开始${a[i]}的REORGCHK结果集入库n">>"check_weekly.log"
if[! -f "reorgchk_tab.del"] then
db2 "import from reorgchk_tab.del of del messages import_tab.msg insert into WEEKLY.REORGCHK_TAB(TABNAME,CARD,OV,NP,FP,ACTBLK,TSIZE,F1,F2,F3,REORG)">>"check_weekly.log"
#对刚入库的数据还要进行进一步的处理,把"schema.table"分开放入两个字段
db2 "update WEEKLY.REORGCHK_TAB set DATE_KEY=$date,DB_NAME='${a[i]}',TABLE_SCHEMA=substr(TABNAME,1,posstr(TABNAME,'.')-1),TABNAME=substr(TABNAME,posstr(TABNAME,'.')+1,length(TABNAME)-posstr(TABNAME,'.')) WHERE DATE_KEY IS NULL">>"check_weekly.log"
else
printf "nreorgchk_tab.del为空">>"check_weekly.log"
fi
if[! -f "reorgchk_tab.del"] then
db2 "import from reorgchk_ind.del of del messages import_ind.msg insert into WEEKLY.REORGCHK_IND(TABNAME,INDNAME,INDCARD,LEAF,ELEAF,LVLS,NDEL,KEYS,LEAF_RECSIZE,NLEAF_RECSIZE,LEAF_PAGE_OVERHEAD,NLEAF_PAGE_OVERHEAD,PCT_PAGES_SAVED,F4,F5,F6,F7,F8,REORG)">>"check_weekly.log"
db2 "update WEEKLY.REORGCHK_IND set DATE_KEY=$date,DB_NAME='${a[i]}',TABLE_SCHEMA=substr(TABNAME,1,posstr(TABNAME,'.')-1),TABNAME=substr(TABNAME,posstr(TABNAME,'.')+1,length(TABNAME)-posstr(TABNAME,'.')),INDEX_SCHEMA=substr(TABNAME,1,posstr(TABNAME,'.')-1),INDNAME=substr(TABNAME,posstr(TABNAME,'.')+1,length(TABNAME)-posstr(TABNAME,'.')) WHERE DATE_KEY IS NULL">>"check_weekly.log"
else
printf "nreorgchk_tab.del为空">>"check_weekly.log"
fi
#清空reorgchk_tab.del and reorgchk_ind.del
>reorgchk_tab.del
>reorgchk_ind.del
db2 terminate
}
printf "n--------------------END Collection $date--------------------nnnn">>"check_weekly.log"
*************************************************check_weekly.sh*******************************************************************************
***********************************************reorgchk_97.awk*********************************************************************************
#author:wangpeng
#date:2012-04-10
#purpose:filter reorgchk's result
#REORGCHK的结果集分为两部分,表和索引
BEGIN{RS="Table:"}
{
#第一部分,处理带*的表
if ( NF == 11 && $NF ~ /*/)
printf $1","$2","$3","$4","$5","$6","$7","$8","$9","$10","$11"n">"reorgchk_tab.del";
#第二部分,处理带*的索引,并且同时取出这个索引对应的表名
else if ( NF >= 20){
for(i=20;i<=NF;i+=19){
if($i ~ /*/){
#不考虑分区表的分区索引
if($(i-18)!~"Partition"){
#输出带*的索引对应的表名
printf $1",">"reorgchk_ind.del";
for(j=i-17;j<=i;j++){
#循环打印各个字段,用,分割,以便入库,最后一个字段后面不要,
printf $j>"reorgchk_ind.del";
if(j!=i)printf ",">"reorgchk_ind.del";
}
printf "n">"reorgchk_ind.del";
}
}
}
}
}
*************************************************reorgchk_97.awk*******************************************************************************
*************************************************init.sql*******************************************************************************
------编目
db2 catalog tcpip node db2inst1 remote 10.19.21.12 server 60000
db2 catalog db sample as sample at node db2inst1
--创建联邦
create wrapper drda
create server SAMPLE type db2/udb version '9.7' wrapper drda authid "db2inst1" password "db2inst1" options(add dbname ' SAMPLE ');
create user mapping for db2inst1 server ubtdb options(add remote_authid 'db2inst1',add remote_password 'db2inst1');
create nickname SAMPLE_DBMCFG for SAMPLE.SYSIBMADM.DBMCFG;
create nickname SAMPLE_DBCFG for SAMPLE.SYSIBMADM.DBCFG;
CREATE NICKNAME SAMPLE_TBSP FOR SAMPLE.SYSIBMADM.SNAPTBSP;
CREATE NICKNAME SAMPLE_TBSP_PART FOR SAMPLE.SYSIBMADM.SNAPTBSP_PART;
CREATE NICKNAME SAMPLE_TB FOR SAMPLE.SYSCAT.TABLES;
CREATE NICKNAME SAMPLE_BP FOR SAMPLE.SYSIBMADM.BP_HITRATIO;
CREATE NICKNAME SAMPLE_SNAPDB FOR SAMPLE.SYSIBMADM.SNAPDB;
DROP TABLE WEEKLY.DBM_CFG;
CREATE TABLE WEEKLY.DBM_CFG(
DATE_KEY INT,
DB_NAME VARCHAR(20),
NAME VARCHAR(30),
VALUE VARCHAR(50),
VALUE_FLAGS VARCHAR(10)
)IN USERSPACE1 INDEX IN IXSPACE;
DROP TABLE WEEKLY.DB_CFG ;
CREATE TABLE WEEKLY.DB_CFG (
DATE_KEY INT,
DB_NAME VARCHAR(20),
NAME VARCHAR(30),
VALUE VARCHAR(50),
VALUE_FLAGS VARCHAR(10)
)IN USERSPACE1 INDEX IN IXSPACE;
DROP TABLE WEEKLY.TBSP_SIZE;
CREATE TABLE WEEKLY.TBSP_SIZE(
DATE_KEY INT,
DB_NAME VARCHAR(20),
ID SMALLINT,
NAME VARCHAR(30),
TYPE VARCHAR(10),
CONTENT VAR VARCHAR(10),
PAGE_SIZE INT,
TOTAL INT,
USABLE INT,
USED INT,
FREE INT
)IN USERSPACE1 INDEX IN IXSPACE;
DROP TABLE WEEKLY.TB_SIZE;
CREATE TABLE WEEKLY.TB_SIZE(
DATE_KEY INT,
DB_NAME VARCHAR(20),
TBSPACEID SMALLINT,
TBSPACE VARCHAR(30),
TABID SMALLINT,
TABNAME VARCHAR(50),
NPAGES INT
)IN USERSPACE1 INDEX IN IXSPACE;
DROP TABLE WEEKLY.BP_SIZE;
CREATE TABLE WEEKLY.BP_SIZE(
DATE_KEY INT,
DB_NAME VARCHAR(20),
BP_NAME VARCHAR(20),
POOL_DATA_L_READS BIGINT,
POOL_DATA_P_READS BIGINT,
POOL_INDEX_L_READS BIGINT,
POOL_INDEX_P_READS BIGINT,
HITRATIO decimal(25,11)
)IN USERSPACE1 INDEX IN IXSPACE;
DROP TABLE WEEKLY.LOCKS;
CREATE TABLE WEEKLY.LOCKS(
DATE_KEY INT,
DB_NAME VARCHAR(20),
DEADLOCKS INT,
LOCK_WAITS BIGINT,
LOCK_WAIT_TIME BIGINT,
LOCK_TIMEOUTS INT,
TOTAL_SORTS BIGINT,
SORT_OVERFLOWS INT
)IN USERSPACE1 INDEX IN IXSPACE;
DROP TABLE WEEKLY.REORGCHK_TAB;
CREATE TABLE WEEKLY.REORGCHK_TAB(
DATE_KEY INT,
DB_NAME VARCHAR(20),
TABLE_SCHEMA VARCHAR(30),
TABNAME VARCHAR(50),
CARD VARCHAR(20),
OV VARCHAR(20),
NP VARCHAR(20),
FP VARCHAR(20),
ACTBLK VARCHAR(20),
TSIZE VARCHAR(20),
F1 VARCHAR(20),
F2 VARCHAR(20),
F3 VARCHAR(20),
REORG VARCHAR(10)
)IN USERSPACE1 INDEX IN IXSPACE;
DROP TABLE WEEKLY.REORGCHK_IND;
CREATE TABLE WEEKLY.REORGCHK_IND(
DATE_KEY INT,
DB_NAME VARCHAR(20),
TABLE_SCHEMA VARCHAR(30),
TABNAME VARCHAR(50),
INDEX_SCHEMA VARCHAR(30),
INDNAME VARCHAR(50),
INDCARD VARCHAR(20),
LEAF VARCHAR(20),
ELEAF VARCHAR(20),
LVLS VARCHAR(20),
NDEL VARCHAR(20),
KEYS VARCHAR(20),
LEAF_RECSIZE VARCHAR(20),
NLEAF_RECSIZE VARCHAR(20),
LEAF_PAGE_OVERHEAD VARCHAR(20),
NLEAF_PAGE_OVERHEAD VARCHAR(20),
PCT_PAGES_SAVED VARCHAR(20),
F4 VARCHAR(20),
F5 VARCHAR(20),
F6 VARCHAR(20),
F7 VARCHAR(20),
F8 VARCHAR(20),
REORG VARCHAR(10)
)IN USERSPACE1 INDEX IN IXSPACE;
*************************************************init.sql*******************************************************************************
--一共8张表,每个表都是由DATE_KEY来定位日期,格式20120416,用DB_NAME来定位数据库,查询时可以通过这两个字段来进行定位
--以下列出大体分析方法
1.如果要对比两次日期的DBM里的参数值是否有变化
SELECT * FROM (
SELECT A.DATE_KEY,A.DB_NAME,A.NAME,A.VALUE A_VAL,B.VALUE B_VAL FROM WEEKLY.DBM_CFG A ,WEEKLY.DBM_CFG B WHERE A.DB_NAME=B.DB_NAME AND A.NAME=B.NAME AND A.DATE_KEY=20120411 AND B.DATE_KEY=20120411)
WHERE A_VAL<>B_VAL
2.如果要对比两次日期的DB里的参数值是否有变化
SELECT * FROM (
SELECT A.DATE_KEY,A.DB_NAME,A.NAME,A.VALUE A_VAL,B.VALUE B_VAL FROM WEEKLY.DB_CFG A ,WEEKLY.DB_CFG B WHERE A.DB_NAME=B.DB_NAME AND A.NAME=B.NAME AND A.DATE_KEY=20120411 AND B.DATE_KEY=20120411)
WHERE A_VAL<>B_VAL
3.要对比表空间使用的增长情况,根据表结构,以及记录的增量方式,如果需要很好的展现出结果,我使用了递归来做,这里的比较系数是一周比较一次,如果想改大环比周期,修改以下的B.NUM+为B.NUM+4(即1个月,4个星期)
USED记录目前的使用页数,CUR记录每次递归的过程中当前表空间使用的页数,B.CUR-A.USED就是这一天的页数减去上个周期的页数
USED是当前的页数,INCREASED里面第一个是上次增长的,第二个是上上次增长的,以此类推……
with A(DATE_KEY,DB_NAME,ID,NAME,TYPE,CONTENT,PAGE_SIZE,TOTAL,USABLE,FREE,USED,NUM) AS
(SELECT DATE_KEY,DB_NAME,ID,NAME,TYPE,CONTENT,PAGE_SIZE,TOTAL,USABLE,FREE,USED,ROW_NUMBER() OVER(PARTITION BY DB_NAME,ID ORDER BY DATE_KEY DESC) NUM FROM WEEKLY.TBSP_SIZE
)
,B(DATE_KEY,DB_NAME,ID,NAME,TYPE,CONTENT,PAGE_SIZE,TOTAL,USABLE,FREE,USED,CUR,INCREASED,NUM) AS
(SELECT DATE_KEY,DB_NAME,ID,NAME,TYPE,CONTENT,PAGE_SIZE,TOTAL,USABLE,FREE,USED,USED,CAST ('' AS VARCHAR(1000)) INCREASED,NUM FROM A WHERE NUM=1
UNION ALL
SELECT B.DATE_KEY,B.DB_NAME,B.ID,B.NAME,B.TYPE,B.CONTENT,B.PAGE_SIZE,B.TOTAL,B.USABLE,B.FREE,B.USED,A.USED,B.INCREASED||','||(B.CUR-A.USED) INCREASED,B.NUM+1 FROM A,B WHERE
A.DB_NAME=B.DB_NAME AND A.ID=B.ID AND A.NUM=B.NUM+1 AND B.NUM<3--这里设置层数,即只查看最近3周的增长情况,考虑到以后数据量越来越大,递归会很慢,
--但我们只关注最近几周的增长情况
)
SELECT DATE_KEY,DB_NAME,ID,NAME,TYPE,CONTENT,PAGE_SIZE,TOTAL,USABLE,FREE,USED,INCREASED FROM B WHERE NUM=(SELECT MAX(NUM) FROM B)
上面的是记录每次增长的量,这个是记录每次的量,HISTORY里面的值是从现在到以前每周的值
with A(DATE_KEY,DB_NAME,ID,NAME,TYPE,CONTENT,PAGE_SIZE,TOTAL,USABLE,FREE,USED,NUM) AS
(SELECT DATE_KEY,DB_NAME,ID,NAME,TYPE,CONTENT,PAGE_SIZE,TOTAL,USABLE,FREE,USED,ROW_NUMBER() OVER(PARTITION BY DB_NAME,ID ORDER BY DATE_KEY DESC) NUM FROM WEEKLY.TBSP_SIZE
)
,B(DATE_KEY,DB_NAME,ID,NAME,TYPE,CONTENT,PAGE_SIZE,TOTAL,USABLE,FREE,USED,HISTORY,NUM) AS
(SELECT DATE_KEY,DB_NAME,ID,NAME,TYPE,CONTENT,PAGE_SIZE,TOTAL,USABLE,FREE,USED,CAST (USED AS VARCHAR(1000)) HISTORY,NUM FROM A WHERE NUM=1
UNION ALL
SELECT B.DATE_KEY,B.DB_NAME,B.ID,B.NAME,B.TYPE,B.CONTENT,B.PAGE_SIZE,B.TOTAL,B.USABLE,B.FREE,B.USED,B.HISTORY||','||A.USED HISTOR,B.NUM+1 FROM A,B WHERE
A.DB_NAME=B.DB_NAME AND A.ID=B.ID AND A.NUM=B.NUM+1 AND B.NUM<3
)
SELECT DATE_KEY,DB_NAME,ID,NAME,TYPE,CONTENT,PAGE_SIZE,TOTAL,USABLE,FREE,USED,HISTORY FROM B WHERE NUM=(SELECT MAX(NUM) FROM B)
4.如3.这是对表增长进行查看
with A(DATE_KEY,DB_NAME,TBSPACEID,TABID,TABNAME,NPAGES,NUM) AS
(SELECT DATE_KEY,DB_NAME,TBSPACEID,TABID,TABNAME,NPAGES,ROW_NUMBER() OVER(PARTITION BY DB_NAME,TBSPACEID,TABID ORDER BY DATE_KEY DESC) NUM FROM WEEKLY.TB_SIZE WHERE NPAGES<>-1
)
,B(DATE_KEY,DB_NAME,TBSPACEID,TABID,TABNAME,USED,CUR,INCREASED,NUM) AS
(SELECT DATE_KEY,DB_NAME,TBSPACEID,TABID,TABNAME,NPAGES,NPAGES,CAST ('' AS VARCHAR(1000)) INCREASED,NUM FROM A WHERE NUM=1
UNION ALL
SELECT B.DATE_KEY,B.DB_NAME,B.TBSPACEID,B.TABID,B.TABNAME,B.USED,A.NPAGES,B.INCREASED||','||(B.CUR-A.NPAGES) INCREASED,B.NUM+1 FROM A,B WHERE
A.DB_NAME=B.DB_NAME AND A.TBSPACEID=B.TBSPACEID AND A.TABID=B.TABID AND A.NUM=B.NUM+1 AND B.NUM<3
)
SELECT DATE_KEY,DB_NAME,TBSPACEID,TABID,TABNAME,USED,INCREASED FROM B WHERE NUM=(SELECT MAX(NUM) FROM B)
5.缓冲池。例如查看命中率小于70%的数据
select *
from "WEEKLY"."BP_SIZE" WHERE HITRATIO<70 AND HITRATIO<>0;
6.通过REORGCHK的结果,分为表和索引两个表(以下只是初步实例,具体要做的时候请仔细测试生成的结果)
表的REORG
select 'db2 reorg table '||TABLE_SCHEMA||'.'||TABNAME||' inplace allow write access start'
from "WEEKLY"."REORGCHK_TAB" where DATE_KEY=20120417 AND DB_NAME='UBTDB' AND REORG LIKE '%*%'
表的RUNSTATS
SELECT 'db2 runstats on table '||TABLE_SCHEMA||'.'||TABNAME||' with distribution and detailed indexes all allow write access '
from "WEEKLY"."REORGCHK_TAB" where DATE_KEY=20120417 AND DB_NAME='UBTDB' AND REORG LIKE '%*%'
索引的REORG
select 'db2 reorg index '||INDEX_SCHEMA||'.'||INDNAME||' for table '||TABLE_SCHEMA||'.'||TABNAME
from "WEEKLY"."REORGCHK_IND" where DATE_KEY=20120417 AND DB_NAME='UBTDB' AND REORG LIKE '%*%'
索引的RUNSTATS
select 'db2 runstats on table '||INDEX_SCHEMA||'.'||INDNAME||' for sampled detailed indexes '||TABLE_SCHEMA||'.'||TABNAME||'allow write access'
from "WEEKLY"."REORGCHK_IND" where DATE_KEY=20120417 AND DB_NAME='UBTDB' AND REORG LIKE '%*%'
7.死锁和锁等待的增长
select *
from "WEEKLY"."LOCKS" where DB_NAME='UBTDB';
基于这些表,应该可以有更多的维度分析,这里只是做了几个比较常规的分析,大家可以集思广益
添加新评论39 条评论
2013-12-09 12:44
2013-04-09 15:32
2013-01-24 16:37
2012-10-19 11:34
2012-07-12 22:12
2012-07-03 08:27
2012-06-27 23:33
2012-05-28 22:00
2012-05-28 08:55
2012-05-27 12:00
一般awk command 不支持字符串作为分隔符,gwak支持。所以命令改为 “db2 reorgchk current statistics |sed 's/Table:/@/' |awk -f reorgchk_v97.awk”。 相应的 改为“RS=@".
2. 可能用DB2提供的Stored Procedure "reorgchk_TB_stats" and "reorgchk_IX_STATS 加一个grep command更简单..
2012-05-23 16:31
2012-05-22 14:07
2012-05-17 08:48
Table: OSCDM.CDCOMPLTARGET
2012-05-17 00:45
Table: OSCDM.CDCOMPLTARGETTP
18 0 1 1 - 1260 0 - 100 ---
Table: OSCDM.CDCOMPOPTP
3 0 1 1 - 102 0 - 100 ---
第二个field应该是18 和3。
2012-05-16 10:55
db2 reorgchk current statistics |awk 'BEGIN{RS="Table:"} {print $2}' |more
statistics:
Pages)
F1
DB2HUBI.AC
0
DB2HUBI.ADEFUSR
你这是什么意思呢,你按照我的脚本应该可以的,我刚测过。调用方法
db2 reorgchk current statistics|awk -f reorgchk_97.awk
2012-05-16 10:18
db2 reorgchk current statistics |awk 'BEGIN{RS="Table:"} {print $2}' |more
statistics:
Pages)
F1
DB2HUBI.AC
0
DB2HUBI.ADEFUSR
DB2HUBI.ADVISE_INDEX
2012-05-16 09:34
2012-05-07 10:27
2012-05-02 13:16
2012-04-28 22:14
2012-04-25 14:48
2012-04-25 11:29
2012-04-24 09:27
2012-04-21 22:14
2012-04-20 11:33
2012-04-19 22:43
2012-04-19 17:52
2012-04-19 14:05
最近想做 "联邦",由于工作也忙也静不下心来专心做,所以一直是失败。对了为什么要叫 "联邦&
2012-04-19 10:58
2012-04-19 09:45
2012-04-18 20:15
2012-04-18 15:48
最近想做 "联邦",由于工作也忙也静不下心来专心做,所以一直是失败。对了为什么要叫 "联邦",这个名词好奇怪哦!
2012-04-18 15:19
2012-04-18 15:16
2012-04-18 09:42
2012-04-18 07:36
2012-04-17 11:47
2012-04-16 22:48
2012-04-15 12:55