r90442 MediaWiki - Code Review archive

Repository:MediaWiki
Revision:r90441‎ | r90442 | r90443 >
Date:08:10, 20 June 2011
Author:rfaulk
Status:deferred
Tags:
Comment:
Added logic for better handling different types of tests
Added Documentation
Modified paths:
  • /trunk/fundraiser-statistics/fundraiser-scripts/web_reporting/tests/views.py (modified) (history)

Diff [purge]

Index: trunk/fundraiser-statistics/fundraiser-scripts/web_reporting/tests/views.py
@@ -1,3 +1,28 @@
 2+"""
 3+ DJANGO VIEW DEFINITIONS:
 4+ ========================
 5+
 6+ Defines the views for Log Miner Logging (LML) application. The LML is meant to provide functionality for observing log mining activity and to
 7+ copy and mine logs at the users request.
 8+
 9+ Views:
 10+
 11+ index -- the index page shows a listing of generated tests by looking at the test table in the FR database via a DataLoader
 12+ test -- This view is responsible for generating the test results via DataReporting object
 13+ add_comment -- Handles the comment form when users wish to add them
 14+
 15+ Helper:
 16+
 17+ auto_gen -- does the actual work of generating the test report plots and results
 18+
 19+"""
 20+
 21+__author__ = "Ryan Faulkner"
 22+__revision__ = "$Rev$"
 23+__date__ = "June 20th, 2011"
 24+
 25+
 26+
227 from django.shortcuts import render_to_response, redirect
328 from django.http import Http404
429 from django.shortcuts import render_to_response, get_object_or_404
@@ -17,6 +42,7 @@
1843 import Fundraiser_Tools.classes.DataReporting as DR
1944 import Fundraiser_Tools.classes.FundraiserDataHandler as FDH
2045 import Fundraiser_Tools.classes.TimestampProcessor as TP
 46+import Fundraiser_Tools.classes.QueryData as QD
2147 import Fundraiser_Tools.settings as projSet
2248 import operator
2349
@@ -51,6 +77,7 @@
5278 utm_campaign_var = request.POST['utm_campaign']
5379 start_time_var = request.POST['start_time']
5480 end_time_var = request.POST['end_time']
 81+ test_type_override = request.POST['test_type_override']
5582
5683 try:
5784 test_type_var = request.POST['test_type']
@@ -59,56 +86,103 @@
6087
6188 except KeyError as e:
6289
63 - os.chdir(projSet.__project_home__ + '/Fundraiser_Tools/classes')
64 - test_type_var, labels = FDH.get_test_type(utm_campaign_var, start_time_var, end_time_var)
65 - os.chdir(projSet.__project_home__ + '/Fundraiser_Tools/web_reporting')
 90+ os.chdir(projSet.__project_home__ + 'classes')
 91+ test_type_var, labels = FDH.get_test_type(utm_campaign_var, start_time_var, end_time_var, DL.CampaignReportingLoader('')) # submit an empty query type
 92+ os.chdir(projSet.__project_home__ + 'web_reporting')
6693
6794 labels = labels.__str__()
6895
6996 labels = labels[1:-1].split(',')
7097 label_dict = dict()
71 -
 98+
7299 for i in range(len(labels)):
73100 label = labels[i].split('\'')[1]
74101 label = label.strip()
 102+ pieces = label.split(' ')
 103+ label = pieces[0]
 104+ for j in range(len(pieces) - 1):
 105+ label = label + '_' + pieces[j+1]
75106 label_dict[label] = label
76107
77 - except KeyError as e:
 108+ except Exception as inst:
 109+
 110+ print type(inst) # the exception instance
 111+ print inst.args # arguments stored in .args
 112+ print inst # __str__ allows args to printed directly
 113+
78114 """ flag an error here for the user """
79115 return HttpResponseRedirect(reverse('tests.views.index'))
80116 # pass
 117+
 118+ os.chdir(projSet.__project_home__ + 'classes')
 119+
 120+ crl = DL.CampaignReportingLoader('')
 121+ artifact_list = list()
 122+
 123+ """
 124+ TEST TYPE OVERRIDE HANDLING:
81125
82 - os.chdir(projSet.__project_home__ + '/Fundraiser_Tools/classes')
 126+ if the user wishes to specify the test type then incorporate that request into the logic
 127+ """
 128+ if test_type_override == 'Banner':
 129+ test_type_var = FDH._TESTTYPE_BANNER_
 130+ crl._query_type_ = test_type_var
 131+ artifact_list = crl.run_query({'utm_campaign' : utm_campaign_var, 'start_time' : start_time_var, 'end_time' : end_time_var})
 132+ elif test_type_override == 'Landing Page':
 133+ test_type_var = FDH._TESTTYPE_LP_
 134+ crl._query_type_ = test_type_var
 135+ artifact_list = crl.run_query({'utm_campaign' : utm_campaign_var, 'start_time' : start_time_var, 'end_time' : end_time_var})
 136+ elif test_type_override == 'Banner and LP':
 137+ test_type_var = FDH._TESTTYPE_BANNER_
 138+ crl._query_type_ = test_type_var
 139+ artifact_list = crl.run_query({'utm_campaign' : utm_campaign_var, 'start_time' : start_time_var, 'end_time' : end_time_var})
83140
84 - """ Execute Report """
85141
 142+ """ convert the artifact list into a label dictionary for the template """
 143+ if len(artifact_list) > 0:
 144+ label_dict = dict()
 145+ for elem in artifact_list:
 146+ label_dict[elem] = elem
 147+
 148+ # os.chdir(projSet.__project_home__ + 'classes')
 149+
 150+
 151+ """ EXECUTE REPORT """
 152+
86153 # Build a test interval - use the entire test period
 154+ sample_interval = 2
87155 start_time_obj = TP.timestamp_to_obj(start_time_var, 1)
88156 end_time_obj = TP.timestamp_to_obj(end_time_var, 1)
89157 time_diff = end_time_obj - start_time_obj
90158 time_diff_min = time_diff.seconds / 60.0
91 - test_interval = int(math.floor(time_diff_min / 2)) # 2 is the interval
 159+ test_interval = int(math.floor(time_diff_min / sample_interval)) # 2 is the interval
92160
93161
94162
95 - os.chdir(projSet.__project_home__ + '/Fundraiser_Tools/web_reporting')
 163+ os.chdir(projSet.__project_home__ + 'web_reporting')
96164
97165 metric_types = FDH.get_test_type_metrics(test_type_var)
 166+ metric_types_full = dict()
98167
 168+ """ Get the full (descriptive) version of the metric names """
 169+ for i in range(len(metric_types)):
 170+ metric_types_full[metric_types[i]] = QD.get_metric_full_name(metric_types[i])
 171+
 172+ """ Depending on the type of test specified call the auto_gen function """
99173 if test_type_var == FDH._TESTTYPE_BANNER_:
100174
101 - winner_dpi, percent_win_dpi, conf_dpi, winner_api, percent_win_api, conf_api = auto_gen(test_name_var, start_time_var, end_time_var, utm_campaign_var, label_dict, 2, test_interval, test_type_var, metric_types)
 175+ winner_dpi, percent_win_dpi, conf_dpi, winner_api, percent_win_api, conf_api, html_table = auto_gen(test_name_var, start_time_var, end_time_var, utm_campaign_var, label_dict, sample_interval, test_interval, test_type_var, metric_types)
102176
103177 html = render_to_response('tests/results_' + FDH._TESTTYPE_BANNER_ + '.html', {'winner' : winner_dpi, 'percent_win_dpi' : '%.2f' % percent_win_dpi, 'percent_win_api' : '%.2f' % percent_win_api, 'conf_dpi' : conf_dpi, 'conf_api' : conf_api, 'utm_campaign' : utm_campaign_var, \
104 - 'metric_names' : metric_types}, context_instance=RequestContext(request))
 178+ 'metric_names_full' : metric_types_full, 'summary_table': html_table, 'sample_interval' : sample_interval}, context_instance=RequestContext(request))
105179 elif test_type_var == FDH._TESTTYPE_LP_:
106180
107 - winner_dpv, percent_win_dpv, conf_dpv, winner_apv, percent_win_apv, conf_apv = auto_gen(test_name_var, start_time_var, end_time_var, utm_campaign_var, label_dict, 2, test_interval, test_type_var, metric_types)
 181+ winner_dpv, percent_win_dpv, conf_dpv, winner_apv, percent_win_apv, conf_apv, html_table = auto_gen(test_name_var, start_time_var, end_time_var, utm_campaign_var, label_dict, sample_interval, test_interval, test_type_var, metric_types)
108182
109183 html = render_to_response('tests/results_' + FDH._TESTTYPE_LP_ + '.html', {'winner' : winner_dpv, 'percent_win_dpv' : '%.2f' % percent_win_dpv, 'percent_win_apv' : '%.2f' % percent_win_apv, 'conf_dpv' : conf_dpv, 'conf_apv' : conf_apv, 'utm_campaign' : utm_campaign_var, \
110 - 'metric_names' : metric_types}, context_instance=RequestContext(request))
 184+ 'metric_names_full' : metric_types_full, 'summary_table': html_table, 'sample_interval' : sample_interval}, context_instance=RequestContext(request))
111185
112 - """ Write to test table """
 186+ """ WRITE TO TEST TABLE """
113187
114188 ttl = DL.TestTableLoader()
115189
@@ -123,6 +197,7 @@
124198 # for i in html_string_parts:
125199 # html_string = html_string + i
126200
 201+
127202 if ttl.record_exists(utm_campaign=utm_campaign_var):
128203 ttl.update_test_row(test_name=test_name_var,test_type=test_type_var,utm_campaign=utm_campaign_var,start_time=start_time_var,end_time=end_time_var,html_report=html_string)
129204 else:
@@ -133,10 +208,8 @@
134209
135210 """
136211
137 - Helper method for 'test' view which generates a report
 212+ Helper method for 'test' view which generates a report based on parameters
138213
139 - INPUT:
140 - RETURN:
141214
142215 """
143216 def auto_gen(test_name, start_time, end_time, campaign, labels, sample_interval, test_interval, test_type, metric_types):
@@ -145,103 +218,57 @@
146219
147220 os.chdir('/home/rfaulkner/trunk/projects/Fundraiser_Tools/classes')
148221
 222+ """ Labels will always be metric names in this case """
149223 use_labels_var = True
150224 if len(labels) == 0:
151225 use_labels_var = False
152226
153227 """ Build reporting objects """
154 - ir = DR.IntervalReporting(use_labels=use_labels_var,font_size=20,plot_type='step',file_path=projSet.__web_home__ + 'tests/static/images/')
155 - ir_cmpgn = DR.IntervalReporting(use_labels=False,font_size=20,plot_type='line',data_loader='campaign',file_path=projSet.__web_home__ + 'tests/static/images/')
 228+ if test_type == FDH._TESTTYPE_BANNER_:
 229+ ir = DR.IntervalReporting(use_labels=use_labels_var,font_size=20,plot_type='step',query_type='banner',file_path=projSet.__web_home__ + 'tests/static/images/')
 230+ elif test_type == FDH._TESTTYPE_LP_:
 231+ ir = DR.IntervalReporting(use_labels=use_labels_var,font_size=20,plot_type='step',query_type='LP',file_path=projSet.__web_home__ + 'tests/static/images/')
 232+
 233+ ir_cmpgn = DR.IntervalReporting(use_labels=False,font_size=20,plot_type='line',query_type='campaign',file_path=projSet.__web_home__ + 'campaigns/static/images/')
156234 cr = DR.ConfidenceReporting(use_labels=use_labels_var,font_size=20,plot_type='line',hyp_test='t_test',file_path=projSet.__web_home__ + 'tests/static/images/')
157235
158236 """ generate interval reporting plots """
159237
160238
161 - # print 'Generating interval plots ...\n'
 239+ """ !! MODIFY -- allow a list of metrics to be passed """
162240 for metric in metric_types:
163 - if test_type == FDH._TESTTYPE_BANNER_:
164 - ir.run(start_time, end_time, sample_interval, 'banner', metric, campaign, labels.keys())
165 - if test_type == FDH._TESTTYPE_LP_:
166 - ir.run(start_time, end_time, sample_interval, 'LP', metric, campaign, labels.keys())
167 -
 241+ #print [start_time, end_time, sample_interval, metric, campaign, labels.keys()]
 242+ ir.run(start_time, end_time, sample_interval, metric, campaign, labels.keys())
 243+
 244+ """ Report summary """
 245+ ir._write_html_table()
 246+ html_table = ir._table_html_
 247+
168248 # print 'Generating campaign plots...\n'
169 - ir_cmpgn.run(start_time, end_time, sample_interval, 'campaign', 'views', campaign, [])
170 - ir_cmpgn.run(start_time, end_time, sample_interval, 'campaign', 'donations', campaign, [])
 249+ ir_cmpgn.run(start_time, end_time, sample_interval, 'views', campaign, [])
 250+ ir_cmpgn.run(start_time, end_time, sample_interval, 'donations', campaign, [])
171251
172 - """ generate confidence reporting plots """
173 - # print 'Executing hypothesis testing ...\n'
174 - # cr.run('Fader VS Static','report_banner_confidence','don_per_imp','20101228JA075',{'Static banner':'20101227_JA061_US','Fading banner':'20101228_JAFader_US'},'20101229141000','20101229155000',2,10)
 252+ """ generate confidence reporting plots """
 253+ #!!! MODIFY -- Omit for now
 254+
175255 if test_type == FDH._TESTTYPE_BANNER_:
176256 winner_dpi, percent_increase_dpi, confidence_dpi = cr.run(test_name,'report_banner_confidence','don_per_imp',campaign, labels, start_time, end_time, sample_interval,test_interval)
177257 winner_api, percent_increase_api, confidence_api = cr.run(test_name,'report_banner_confidence','amt50_per_imp',campaign, labels, start_time, end_time, sample_interval,test_interval)
178258 elif test_type == FDH._TESTTYPE_LP_:
179259 winner_dpi, percent_increase_dpi, confidence_dpi = cr.run(test_name,'report_LP_confidence','don_per_view',campaign, labels, start_time, end_time, sample_interval,test_interval)
180260 winner_api, percent_increase_api, confidence_api = cr.run(test_name,'report_LP_confidence','amt50_per_view',campaign, labels, start_time, end_time, sample_interval,test_interval)
181 -
182 - """ compose HTML """
183261
184 - os.chdir('/home/rfaulkner/trunk/projects/Fundraiser_Tools/classes/tests/')
185 -
186 -
187 - #f = open('auto_report.html', 'w')
188 -
189 - html_script = ''
190 -
191 - html_script = html_script + '\n<html>\n<head>\n<title>Big Ass Reportin\'</title>'
192 -
193 - html_script = html_script + '</head>\n<body>\n<h1>Test Report</h1>\n<br>\n'
 262+ #winner_dpi, percent_increase_dpi, confidence_dpi = ['',0.0,'']
 263+ #winner_api, percent_increase_api, confidence_api = ['',0.0,'']
 264+ return [winner_dpi, percent_increase_dpi, confidence_dpi, winner_api, percent_increase_api, confidence_api, html_table]
194265
195 - html_script = html_script + '<h3><u>Interval Reporting</u></h3>\n'
196 -
197 - html_script = html_script + '<OBJECT WIDTH="1000" HEIGHT="600" data="' + campaign + '_banner_' + metric_types[0] + '.png" type="image/png">\n<p>.</p>\n</OBJECT><br>\n'
198 - html_script = html_script + '<OBJECT WIDTH="1000" HEIGHT="600" data="' + campaign + '_banner_' + metric_types[1] + '.png" type="image/png">\n<p>.</p>\n</OBJECT><br>\n'
199 - html_script = html_script + '<OBJECT WIDTH="1000" HEIGHT="600" data="' + campaign + '_banner_' + metric_types[2] + '.png" type="image/png">\n<p>.</p>\n</OBJECT><br>\n'
200 - html_script = html_script + '<OBJECT WIDTH="1000" HEIGHT="600" data="' + campaign + '_banner_' + metric_types[3] + '.png" type="image/png">\n<p>.</p>\n</OBJECT><br>\n'
201 - html_script = html_script + '<OBJECT WIDTH="1000" HEIGHT="600" data="' + campaign + '_campaign_views' + '.png" type="image/png">\n<p>.</p>\n</OBJECT><br>\n'
202 - html_script = html_script + '<OBJECT WIDTH="1000" HEIGHT="600" data="' + campaign + '_campaign_donations' + '.png" type="image/png">\n<p>.</p>\n</OBJECT><br>\n'
203 -
204 - html_script = html_script + '<h3><u>Confidence Reporting</u></h3>\n'
205 -
206 - html_script = html_script + '<OBJECT WIDTH="1000" HEIGHT="600" data="' + campaign + '_conf_don_per_imp' + '.png" type="image/png">\n<p>.</p>\n</OBJECT><br>\n'
207 - html_script = html_script + '<OBJECT WIDTH="1000" HEIGHT="600" data="' + campaign + '_conf_amt50_per_imp' + '.png" type="image/png">\n<p>.</p>\n</OBJECT><br>\n'
208 -
209 - """ !! MODIFY -- THIS currently doesn't look great !!
210 -
211 - f_test_results_1 = open(campaign + '_conf_don_per_imp' + '.txt', 'r')
212 - f_test_results_2 = open(campaign + '_conf_amt50_per_imp' + '.txt', 'r')
213 -
214 - data_results_1 = ''
215 - line = f_test_results_1.readline()
216 - while (line):
217 - data_results_1 = data_results_1 + line + '<br>'
218 - line = f_test_results_1.readline()
219 -
220 - data_results_2 = ''
221 - line = f_test_results_2.readline()
222 - while (line):
223 - data_results_2 = data_results_2 + line + '<br>'
224 - line = f_test_results_2.readline()
225 -
226 - html_script = html_script + data_results_1 + '<br><br>'
227 - html_script = html_script + data_results_2
228266
229 - """
 267+"""
 268+ Inserts a comment into an existing report
230269
231 - html_script = html_script + '</body></html>\n'
232 -
233 - # print html_script
234 -
235 - #f.write(html_script)
236 -
237 - #f.close()
 270+ !! FIXME - do this dynamically with AJAX !!
238271
239 - return [winner_dpi, percent_increase_dpi, confidence_dpi, winner_api, percent_increase_api, confidence_api]
240 -
241 -
242272 """
243 - Inserts a comment into an existing report
244 -
245 -"""
246273 def add_comment(request, utm_campaign):
247274
248275 try:
@@ -255,7 +282,7 @@
256283 row = ttl.get_test_row(utm_campaign)
257284 html_string = ttl.get_test_field(row, 'html_report')
258285
259 - """ Insert comment """
 286+ """ Insert comment into the page html """
260287 new_html = ''
261288 lines = html_string.split('\n')
262289 now = datetime.datetime.utcnow().__str__()

Status & tagging log