利用python进入数据分析之数据聚合与数据分组运算
from __future__ import division
from numpy.random import randn
import numpy as np
import os
import matplotlib.pyplot as plt
np.random.seed(12345)
plt.rc('figure', figsize=(10, 6))
from pandas import Series, DataFrame
import pandas as pd
np.set_printoptions(precision=4)
In [3]:
pd.options.display.notebook_repr_html = False
In [4]:
%matplotlib inline
In [5]:
df = DataFrame({'key1' : ['a', 'a', 'b', 'b', 'a'],
'key2' : ['one', 'two', 'one', 'two', 'one'],
'data1' : np.random.randn(5),
'data2' : np.random.randn(5)})
df
Out[5]:
In [6]:
grouped = df['data1'].groupby(df['key1'])
grouped
Out[6]:
In [7]:
grouped.mean()
Out[7]:
In [8]:
means = df['data1'].groupby([df['key1'], df['key2']]).mean() # 计算平均值
means
Out[8]:
In [9]:
means.unstack() # 数据转置
Out[9]:
In [12]:
states = np.array(['Ohio', 'California', 'California', 'Ohio', 'Ohio'])
years = np.array([2005, 2005, 2006, 2005, 2006])
df['data1'].groupby([states, years]).mean()
Out[12]:
In [13]:
df.groupby('key1').mean() # 不是数值的key2,将会自动过滤掉
Out[13]:
In [15]:
df.groupby(['key1', 'key2']).mean()
Out[15]:
In [16]:
df.groupby(['key1', 'key2']).size() # 返回分组的大小
Out[16]:
In [17]:
df
Out[17]:
In [18]:
for name, group in df.groupby('key1'):
print(name)
print(group)
In [19]:
for (k1, k2), group in df.groupby(['key1', 'key2']):
print((k1, k2))
print(group)
In [21]:
pieces = dict(list(df.groupby('key1')))
pieces
Out[21]:
In [22]:
pieces['b']
Out[22]:
In [23]:
df.dtypes
Out[23]:
In [24]:
grouped = df.groupby(df.dtypes, axis=1)
dict(list(grouped))
Out[24]:
In [25]:
df
Out[25]:
In [29]:
df.groupby(['key1', 'key2'])[['data2']].mean()
Out[29]:
In [30]:
s_grouped = df.groupby(['key1', 'key2'])['data2']
s_grouped
Out[30]:
In [31]:
s_grouped.mean()
Out[31]:
In [33]:
people = DataFrame(np.random.randn(5, 5),
columns=['a', 'b', 'c', 'd', 'e'],
index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])
people.ix[2:3, ['b', 'c']] = np.nan # Add a few NA values
people
Out[33]:
In [34]:
mapping = {'a': 'red', 'b': 'red', 'c': 'blue',
'd': 'blue', 'e': 'red', 'f' : 'orange'}
In [37]:
by_column = people.groupby(mapping, axis=1)
by_column.sum()
Out[37]:
In [38]:
map_series = Series(mapping)
map_series
Out[38]:
In [39]:
people.groupby(map_series, axis=1).count()
Out[39]:
In [40]:
people.groupby(len).sum() # 通过长度进行分组
Out[40]:
In [41]:
key_list = ['one', 'one', 'one', 'two', 'two']
people.groupby([len, key_list]).min()
Out[41]:
In [42]:
columns = pd.MultiIndex.from_arrays([['US', 'US', 'US', 'JP', 'JP'],
[1, 3, 5, 1, 3]], names=['cty', 'tenor'])
hier_df = DataFrame(np.random.randn(4, 5), columns=columns)
hier_df
Out[42]:
In [43]:
hier_df.groupby(level='cty', axis=1).count()
Out[43]:
In [44]:
df
Out[44]:
In [45]:
grouped = df.groupby('key1')
grouped['data1'].quantile(0.9) # quantile计算样本分位数
Out[45]:
In [46]:
def peak_to_peak(arr):
return arr.max() - arr.min()
grouped.agg(peak_to_peak) # 使用自己的聚合函数
Out[46]:
In [47]:
grouped.describe()
Out[47]:
In [50]:
tips = pd.read_csv('../tips.csv')
# Add tip percentage of total bill
tips['tip_pct'] = tips['tip'] / tips['total_bill'] # 增加tip_pct这一列
tips[:6]
Out[50]:
In [51]:
grouped = tips.groupby(['sex', 'smoker'])
In [52]:
grouped_pct = grouped['tip_pct']
grouped_pct.agg('mean')
Out[52]:
In [53]:
grouped_pct.agg(['mean', 'std', peak_to_peak]) # 可以传入函数
Out[53]:
In [54]:
grouped_pct.agg([('foo', 'mean'), ('bar', np.std)])
Out[54]:
In [56]:
functions = ['count', 'mean', 'max']
result = grouped['tip_pct', 'total_bill'].agg(functions)
result
Out[56]:
In [57]:
result['tip_pct']
Out[57]:
In [58]:
ftuples = [('Durchschnitt', 'mean'), ('Abweichung', np.var)]
grouped['tip_pct', 'total_bill'].agg(ftuples)
Out[58]:
In [59]:
grouped.agg({'tip' : np.max, 'size' : 'sum'})
Out[59]:
In [60]:
grouped.agg({'tip_pct' : ['min', 'max', 'mean', 'std'],
'size' : 'sum'})
Out[60]:
In [61]:
tips.groupby(['sex', 'smoker'], as_index=False).mean()
Out[61]:
In [62]:
df
Out[62]:
In [63]:
k1_means = df.groupby('key1').mean().add_prefix('mean_')
k1_means
Out[63]:
In [64]:
pd.merge(df, k1_means, left_on='key1', right_index=True)
Out[64]:
In [65]:
key = ['one', 'two', 'one', 'two', 'one']
people.groupby(key).mean()
Out[65]:
In [66]:
people.groupby(key).transform(np.mean)
Out[66]:
In [67]:
def demean(arr):
return arr - arr.mean()
demeaned = people.groupby(key).transform(demean)
demeaned
Out[67]:
In [68]:
demeaned.groupby(key).mean()
Out[68]:
In [69]:
def top(df, n=5, column='tip_pct'):
return df.sort_index(by=column)[-n:]
top(tips, n=6) # 找出最高的6个tip_pct
Out[69]:
In [71]:
tips.groupby('smoker').apply(top) # 对抽烟进行分组,找出抽的和不抽的
Out[71]:
In [72]:
tips.groupby(['smoker', 'day']).apply(top, n=1, column='total_bill') # 可以传入top的参数
Out[72]:
In [73]:
result = tips.groupby('smoker')['tip_pct'].describe()
result
Out[73]:
In [74]:
result.unstack('smoker')
Out[74]:
In [75]:
tips.groupby('smoker', group_keys=False).apply(top)
Out[75]:
In [76]:
frame = DataFrame({'data1': np.random.randn(1000),
'data2': np.random.randn(1000)})
factor = pd.cut(frame.data1, 4)
factor[:10]
Out[76]:
In [77]:
def get_stats(group):
return {'min': group.min(), 'max': group.max(),
'count': group.count(), 'mean': group.mean()}
grouped = frame.data2.groupby(factor)
grouped.apply(get_stats).unstack()
#ADAPT the output is not sorted in the book while this is the case now (swap first two lines)
Out[77]:
In [78]:
# Return quantile numbers
grouping = pd.qcut(frame.data1, 10, labels=False)
grouped = frame.data2.groupby(grouping)
grouped.apply(get_stats).unstack()
Out[78]:
In [79]:
s = Series(np.random.randn(6))
s[::2] = np.nan
s
Out[79]:
In [80]:
s.fillna(s.mean()) # 用平均值填充
Out[80]:
In [81]:
states = ['Ohio', 'New York', 'Vermont', 'Florida',
'Oregon', 'Nevada', 'California', 'Idaho']
group_key = ['East'] * 4 + ['West'] * 4
data = Series(np.random.randn(8), index=states)
data[['Vermont', 'Nevada', 'Idaho']] = np.nan
data
Out[81]:
In [82]:
data.groupby(group_key).mean()
Out[82]:
In [83]:
fill_mean = lambda g: g.fillna(g.mean()) # 填充分组平均值
data.groupby(group_key).apply(fill_mean)
Out[83]:
In [84]:
fill_values = {'East': 0.5, 'West': -1}
fill_func = lambda g: g.fillna(fill_values[g.name])
data.groupby(group_key).apply(fill_func)
Out[84]:
In [85]:
# Hearts(红桃), Spades(黑桃), Clubs(梅花), Diamonds(方片)
suits = ['H', 'S', 'C', 'D']
card_val = (range(1, 11) + [10] * 3) * 4
base_names = ['A'] + range(2, 11) + ['J', 'K', 'Q']
cards = []
for suit in ['H', 'S', 'C', 'D']:
cards.extend(str(num) + suit for num in base_names)
deck = Series(card_val, index=cards)
In [86]:
deck[:13]
Out[86]:
In [87]:
def draw(deck, n=5):
return deck.take(np.random.permutation(len(deck))[:n])
draw(deck) # 随机抽取5张
Out[87]:
In [88]:
get_suit = lambda card: card[-1] # last letter is suit
deck.groupby(get_suit).apply(draw, n=2) # 每种花色中抽取两张牌
Out[88]:
In [89]:
# alternatively(其他方法)
deck.groupby(get_suit, group_keys=False).apply(draw, n=2)
Out[89]:
In [90]:
df = DataFrame({'category': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'],
'data': np.random.randn(8),
'weights': np.random.rand(8)})
df
Out[90]:
In [91]:
grouped = df.groupby('category')
get_wavg = lambda g: np.average(g['data'], weights=g['weights'])
grouped.apply(get_wavg) # 求加权平均数
Out[91]:
In [92]:
close_px = pd.read_csv('../stock_px.csv', parse_dates=True, index_col=0)
close_px.info()
In [93]:
close_px[-4:]
Out[93]:
In [94]:
rets = close_px.pct_change().dropna()
spx_corr = lambda x: x.corrwith(x['SPX'])
by_year = rets.groupby(lambda x: x.year)
by_year.apply(spx_corr)
Out[94]:
In [95]:
# Annual correlation of Apple with Microsoft
by_year.apply(lambda g: g['AAPL'].corr(g['MSFT']))
Out[95]:
In [96]:
import statsmodels.api as sm
def regress(data, yvar, xvars):
Y = data[yvar]
X = data[xvars]
X['intercept'] = 1.
result = sm.OLS(Y, X).fit()
return result.params
In [97]:
by_year.apply(regress, 'AAPL', ['SPX'])#计算线性回归
Out[97]:
In [98]:
tips.pivot_table(index=['sex', 'smoker'])
Out[98]:
In [99]:
tips.pivot_table(['tip_pct', 'size'], index=['sex', 'day'],
columns='smoker')
Out[99]:
In [100]:
tips.pivot_table(['tip_pct', 'size'], index=['sex', 'day'],
columns='smoker', margins=True)
Out[100]:
In [101]:
tips.pivot_table('tip_pct', index=['sex', 'smoker'], columns='day',
aggfunc=len, margins=True)
Out[101]:
In [102]:
tips.pivot_table('size', index=['time', 'sex', 'smoker'],
columns='day', aggfunc='sum', fill_value=0)
Out[102]:
In [103]:
from StringIO import StringIO
data = """\
Sample Gender Handedness
1 Female Right-handed
2 Male Left-handed
3 Female Right-handed
4 Male Right-handed
5 Male Left-handed
6 Male Right-handed
7 Female Right-handed
8 Female Left-handed
9 Male Right-handed
10 Female Right-handed"""
data = pd.read_table(StringIO(data), sep='\s+')
In [104]:
data
Out[104]:
In [105]:
pd.crosstab(data.Gender, data.Handedness, margins=True)
Out[105]:
In [106]:
pd.crosstab([tips.time, tips.day], tips.smoker, margins=True)
Out[106]:
In [108]:
fec = pd.read_csv('../P00000001-ALL.csv')
In [109]:
fec.info()
In [110]:
fec.ix[123456]
Out[110]:
In [111]:
unique_cands = fec.cand_nm.unique() # 获得不同候选人名单
unique_cands
Out[111]:
In [112]:
unique_cands[2]
Out[112]:
In [113]:
parties = {'Bachmann, Michelle': 'Republican',
'Cain, Herman': 'Republican',
'Gingrich, Newt': 'Republican',
'Huntsman, Jon': 'Republican',
'Johnson, Gary Earl': 'Republican',
'McCotter, Thaddeus G': 'Republican',
'Obama, Barack': 'Democrat',
'Paul, Ron': 'Republican',
'Pawlenty, Timothy': 'Republican',
'Perry, Rick': 'Republican',
"Roemer, Charles E. 'Buddy' III": 'Republican',
'Romney, Mitt': 'Republican',
'Santorum, Rick': 'Republican'}
In [114]:
fec.cand_nm[123456:123461]
Out[114]:
In [115]:
fec.cand_nm[123456:123461].map(parties)
Out[115]:
In [116]:
# Add it as a column
fec['party'] = fec.cand_nm.map(parties)
In [117]:
fec['party'].value_counts()
Out[117]:
In [118]:
(fec.contb_receipt_amt > 0).value_counts()
Out[118]:
In [119]:
fec = fec[fec.contb_receipt_amt > 0]
In [120]:
fec_mrbo = fec[fec.cand_nm.isin(['Obama, Barack', 'Romney, Mitt'])]
In [122]:
fec.contbr_occupation.value_counts()[:10]
Out[122]:
In [123]:
occ_mapping = {
'INFORMATION REQUESTED PER BEST EFFORTS' : 'NOT PROVIDED',
'INFORMATION REQUESTED' : 'NOT PROVIDED',
'INFORMATION REQUESTED (BEST EFFORTS)' : 'NOT PROVIDED',
'C.E.O.': 'CEO'
}
# If no mapping provided, return x
f = lambda x: occ_mapping.get(x, x)
fec.contbr_occupation = fec.contbr_occupation.map(f)
In [124]:
emp_mapping = {
'INFORMATION REQUESTED PER BEST EFFORTS' : 'NOT PROVIDED',
'INFORMATION REQUESTED' : 'NOT PROVIDED',
'SELF' : 'SELF-EMPLOYED',
'SELF EMPLOYED' : 'SELF-EMPLOYED',
}
# If no mapping provided, return x
f = lambda x: emp_mapping.get(x, x)
fec.contbr_employer = fec.contbr_employer.map(f)
In [125]:
by_occupation = fec.pivot_table('contb_receipt_amt',
index='contbr_occupation',
columns='party', aggfunc='sum') #聚合操作
In [126]:
over_2mm = by_occupation[by_occupation.sum(1) > 2000000] #过滤掉出资额不足200万美元
over_2mm
Out[126]:
In [127]:
over_2mm.plot(kind='barh')
Out[127]:
In [131]:
bins = np.array([0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000])
labels = pd.cut(fec_mrbo.contb_receipt_amt, bins)
labels
Out[131]:
In [132]:
grouped = fec_mrbo.groupby(['cand_nm', labels])
grouped.size().unstack(0)
Out[132]:
In [133]:
bucket_sums = grouped.contb_receipt_amt.sum().unstack(0)
bucket_sums
Out[133]:
In [134]:
normed_sums = bucket_sums.div(bucket_sums.sum(axis=1), axis=0)
normed_sums
Out[134]:
In [135]:
normed_sums[:-2].plot(kind='barh', stacked=True)
Out[135]:
In [136]:
grouped = fec_mrbo.groupby(['cand_nm', 'contbr_st'])
totals = grouped.contb_receipt_amt.sum().unstack(0).fillna(0)
totals = totals[totals.sum(1) > 100000]
totals[:10]
Out[136]:
In [137]:
percent = totals.div(totals.sum(1), axis=0)
percent[:10]
Out[137]: