- Notifications
You must be signed in to change notification settings - Fork 849
/
Copy pathoml4sql-classification-glm.sql
523 lines (471 loc) · 17.8 KB
/
oml4sql-classification-glm.sql
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
-----------------------------------------------------------------------
-- Oracle Machine Learning for SQL (OML4SQL) 21c
--
-- Classification - Generalized Linear Model Algorithm - dmglcdem.sql
--
-- Copyright (c) 2021 Oracle Corporation and/or its affilitiates.
--
-- The Universal Permissive License (UPL), Version 1.0
--
-- https://oss.oracle.com/licenses/upl/
-----------------------------------------------------------------------
SET serveroutput ON
SET trimspool ON
SET pages 10000
SET echo ON
-----------------------------------------------------------------------
-- SAMPLE PROBLEM
-----------------------------------------------------------------------
-- Given demographic and purchase data about a set of customers, predict
-- customer's response to an affinity card program using a GLM classifier.
--
-----------------------------------------------------------------------
-- SET UP AND ANALYZE THE DATA
-----------------------------------------------------------------------
-------
-- DATA
-------
-- The data for this sample is composed from base tables in SH Schema
-- (See Sample Schema Documentation) and presented through these views:
-- mining_data_build_v (build data)
-- mining_data_test_v (test data)
-- mining_data_apply_v (apply data)
-- (See dmsh.sql for view definitions).
--
-----------
-- ANALYSIS
-----------
-- Data preparation in GLM is performed internally
--
-----------------------------------------------------------------------
-- BUILD THE MODEL
-----------------------------------------------------------------------
-- Cleanup old model with the same name for repeat runs
BEGINDBMS_DATA_MINING.DROP_MODEL('GLMC_SH_Clas_sample');
EXCEPTION WHEN OTHERS THEN NULL; END;
/
------------------
-- SPECIFY SETTINGS
--
-- Cleanup old settings table for repeat runs
BEGIN EXECUTE IMMEDIATE 'DROP TABLE glmc_sh_sample_settings';
EXCEPTION WHEN OTHERS THEN NULL; END;
/
-- CREATE AND POPULATE A SETTINGS TABLE
--
set echo off
CREATETABLEglmc_sh_sample_settings (
setting_name VARCHAR2(30),
setting_value VARCHAR2(4000));
set echo on
-- The default classification algorithm is Naive Bayes. So override
-- this choice to GLM logistic regression using a settings table.
-- Turn on feature selection and generation
--
BEGIN
-- Populate settings table
INSERT INTO glmc_sh_sample_settings (setting_name, setting_value) VALUES
(dbms_data_mining.algo_name, dbms_data_mining.algo_generalized_linear_model);
-- output row diagnostic statistics
INSERT INTO glmc_sh_sample_settings (setting_name, setting_value) VALUES
(dbms_data_mining.glms_row_diagnostics,
dbms_data_mining.glms_row_diag_enable);
INSERT INTO glmc_sh_sample_settings (setting_name, setting_value) VALUES
(dbms_data_mining.prep_auto, dbms_data_mining.prep_auto_on);
-- turn on feature selection
INSERT INTO glmc_sh_sample_settings (setting_name, setting_value) VALUES
(dbms_data_mining.glms_ftr_selection,
dbms_data_mining.glms_ftr_selection_enable);
-- turn on feature generation
INSERT INTO glmc_sh_sample_settings (setting_name, setting_value) VALUES
(dbms_data_mining.glms_ftr_generation,
dbms_data_mining.glms_ftr_generation_enable);
/* Examples of possible overrides are shown below. If the user does not
override, then relevant settings are determined by the algorithm
-- specify a row weight column
(dbms_data_mining.odms_row_weight_column_name,<row_weight_column_name>);
-- specify a missing value treatment method:
Default: replace with mean (numeric features) or
mode (categorical features)
or delete the row
(dbms_data_mining.odms_missing_value_treatment,
dbms_data_mining.odms_missing_value_delete_row);
-- turn ridge regression on or off
By default the system turns it on if there is a multicollinearity
(dbms_data_mining.glms_ridge_regression,
dbms_data_mining.glms_ridge_reg_enable);
*/
END;
/
---------------------
-- CREATE A NEW MODEL
--
-- Force the column age to be a feature in the model using
-- dbms_data_mining_transform
--
declare
v_xlst dbms_data_mining_transform.TRANSFORM_LIST;
BEGIN
-- Force the column age to be a feature in the model
dbms_data_mining_transform.set_transform(v_xlst,
'AGE', NULL, 'AGE', 'AGE', 'FORCE_IN');
DBMS_DATA_MINING.CREATE_MODEL(
model_name =>'GLMC_SH_Clas_sample',
mining_function =>dbms_data_mining.classification,
data_table_name =>'mining_data_build_v',
case_id_column_name =>'cust_id',
target_column_name =>'affinity_card',
settings_table_name =>'glmc_sh_sample_settings',
xform_list => v_xlst);
END;
/
-------------------------
-- DISPLAY MODEL SETTINGS
--
column setting_name format a30
column setting_value format a30
SELECT setting_name, setting_value
FROM user_mining_model_settings
WHERE model_name ='GLMC_SH_CLAS_SAMPLE'
ORDER BY setting_name;
--------------------------
-- DISPLAY MODEL SIGNATURE
--
column attribute_name format a40
column attribute_type format a20
SELECT attribute_name, attribute_type
FROM user_mining_model_attributes
WHERE model_name ='GLMC_SH_CLAS_SAMPLE'
ORDER BY attribute_name;
------------------------
-- DISPLAY MODEL DETAILS
-- IF the covariance matrix had been invalid, THEN the global details
-- would have had a row like:
--
-- VALID_COVARIANCE_MATRIX 0
--
-- And, as a result we would only have gotten a limited set of diagnostics.
-- This never happens with feature selecion enabled. IF the forced in feature
-- had caused a multi-collinearity then the model build would have failed.
-- Note that the forced_in feature, age, was not statistically significant.
-- However, it did not cause a multi-collinearity, hence the build succeeded.
--
-- With feature selection disabled, then an invalid covariance matrix is
-- possible. Then, multi-collinearity, if it exists will cause
-- RIDGE REGRESSION to kick in, unless it has been specifically disabled by
-- you. The build will succeed with ridge enabled. However, the covariance
-- matrix will be invalid since it is not computed by the ridge algorithm.
--
-- An important consequence of an invalid covariance matrix is that
-- the model cannot predict confidence bounds - i.e. the result of
-- PREDICTION_BOUNDS function in a SQL query is NULL.
--
-- If accuracy is the primary goal and interpretability not important, then we
-- note that RIDGE REGRESSION may be preferrable to feature selection for
-- some datasets. You can test this by specifically enabling ridge regression.
--
-- In this demo, we compute two models. The first (above) has feature
-- selection enabled and produces confidence bounds and a full set of
-- diagnostics. The second enables ridge regression. It does not produce
-- confidence bounds. It only produces a limited set of diagnostics.
-- Get a list of model views
col view_name format a30
col view_type format a50
SELECT view_name, view_type FROM user_mining_model_views
WHERE model_name='GLMC_SH_CLAS_SAMPLE'
ORDER BY view_name;
-- Global statistics
column name format a30
column numeric_value format 9999990.999
column string_value format a20
select name, numeric_value, string_value from DM$VGGLMC_SH_CLAS_SAMPLE
ORDER BY name;
-- Coefficient statistics
SETline200
column feature_expression format a53
column attr_name format a20
col attr_val format a10
column coefficient format 9999990.999
column std_error format 9999990.999
column test_statistic format 9999990.999
column p_value format 9999990.999
column std_coefficient format 9999990.999
column lower_coeff_limit format 9999990.999
column upper_coeff_limit format 9999990.999
column exp_coefficient format 9999990.999
column exp_lower_coeff_limit format 9999990.999
column exp_upper_coeff_limit format 9999990.999
SELECT attribute_name attr_name, attribute_value attr_val,
coefficient, std_error, test_statistic,
p_value, std_coefficient, lower_coeff_limit, upper_coeff_limit,
exp_coefficient, exp_lower_coeff_limit, exp_upper_coeff_limit
FROM DM$VDGLMC_SH_CLAS_SAMPLE
ORDER BY1,2;
-- Show the features and their p_values
SET lin 80
SET pages 20
SELECT attribute_name attr_name, attribute_value attr_val, coefficient, p_value
FROM DM$VDGLMC_SH_CLAS_SAMPLE
ORDER BY p_value;
-- Row diagnostics
SELECT CASE_id, TARGET_value, TARGET_value_prob, hat,
working_residual, pearson_residual, deviance_residual,
c, cbar, difdev, difchisq
FROM dm$vaGLMC_SH_Clas_sample
WHERE case_id <=101510
ORDER BY case_id;
-----------------------------------------------------------------------
-- TEST THE MODEL
-----------------------------------------------------------------------
------------------------------------
-- COMPUTE METRICS TO TEST THE MODEL
--
-- The queries shown below demonstrate the use of new SQL data mining functions
-- along with analytic functions to compute the various test metrics.
--
-- Modelname: glmc_sh_clas_sample
-- Target attribute: affinity_card
-- Positive target value: 1
-- (Change as appropriate for a different example)
-- Compute CONFUSION MATRIX
--
-- This query demonstates how to generate a confusion matrix using the new
-- SQL prediction functions for scoring. The returned columns match the
-- schema of the table generated by COMPUTE_CONFUSION_MATRIX procedure.
--
SELECT affinity_card AS actual_target_value,
PREDICTION(glmc_sh_clas_sample USING *) AS predicted_target_value,
COUNT(*) AS value
FROM mining_data_test_v
GROUP BY affinity_card, PREDICTION(glmc_sh_clas_sample USING *)
ORDER BY1, 2;
-- Compute ACCURACY
--
column accuracy format 9.99
SELECTSUM(correct)/COUNT(*) AS accuracy
FROM (SELECT DECODE(affinity_card,
PREDICTION(glmc_sh_clas_sample USING *), 1, 0) AS correct
FROM mining_data_test_v);
-- Compute AUC (Area Under the roc Curve)
-- (See notes on ROC Curve and AUC computation in dmsvcdem.sql)
--
column auc format 9.99
WITH
pos_prob_and_counts AS (
SELECT PREDICTION_PROBABILITY(glmc_sh_clas_sample, 1 USING *) pos_prob,
DECODE(affinity_card, 1, 1, 0) pos_cnt
FROM mining_data_test_v
),
tpf_fpf AS (
SELECT pos_cnt,
SUM(pos_cnt) OVER (ORDER BY pos_prob DESC) /
SUM(pos_cnt) OVER () tpf,
SUM(1- pos_cnt) OVER (ORDER BY pos_prob DESC) /
SUM(1- pos_cnt) OVER () fpf
FROM pos_prob_and_counts
),
trapezoid_areas AS (
SELECT0.5* (fpf - LAG(fpf, 1, 0) OVER (ORDER BY fpf, tpf)) *
(tpf + LAG(tpf, 1, 0) OVER (ORDER BY fpf, tpf)) area
FROM tpf_fpf
WHERE pos_cnt =1
OR (tpf =1AND fpf =1)
)
SELECTSUM(area) auc
FROM trapezoid_areas;
--------------------------------------------------------------------------
-- SCORE DATA
--------------------------------------------------------------------------
-- Since the model has a valid covariance matrix, it is possible
-- to obtain confidence bounds. In addition, provide the ranked set
-- of attributes which have the most influence on each prediction.
set long 10000
SELECT PREDICTION(GLMC_SH_Clas_sample USING *) pr,
PREDICTION_PROBABILITY(GLMC_SH_Clas_sample USING *) pb,
PREDICTION_BOUNDS(GLMC_SH_Clas_sample USING *).lower pl,
PREDICTION_BOUNDS(GLMC_SH_Clas_sample USING *).upper pu,
PREDICTION_DETAILS(GLMC_SH_Clas_sample USING *) pd
FROM mining_data_apply_v
WHERE CUST_ID <=100010
ORDER BY CUST_ID;
-- Next we compare to a model built with ridge regression enabled
-----------------------------------------------------------------------
-- BUILD A NEW MODEL
-----------------------------------------------------------------------
---------------------
-- CREATE A NEW MODEL
--
-- Cleanup old model with same name (if any)
BEGIN
DBMS_DATA_MINING.DROP_MODEL('GLMC_SH_Clas_sample');
EXCEPTION WHEN OTHERS THEN
NULL;
END;
/
-- SPECIFY SETTINGS
--
-- Cleanup old settings table
BEGIN EXECUTE IMMEDIATE 'DROP TABLE glmc_sh_sample_settings';
EXCEPTION WHEN OTHERS THEN NULL; END;
/
CREATETABLEglmc_sh_sample_settings (
setting_name VARCHAR2(30),
setting_value VARCHAR2(4000));
-- Turn on ridge regression
--
BEGIN
-- Populate settings table
INSERT INTO glmc_sh_sample_settings (setting_name, setting_value) VALUES
(dbms_data_mining.algo_name, dbms_data_mining.algo_generalized_linear_model);
-- output row diagnostic statistics into a table named GLMC_SH_SAMPLE_DIAG
INSERT INTO glmc_sh_sample_settings (setting_name, setting_value) VALUES
(dbms_data_mining.glms_row_diagnostics,
dbms_data_mining.glms_row_diag_enable);
INSERT INTO glmc_sh_sample_settings (setting_name, setting_value) VALUES
(dbms_data_mining.prep_auto, dbms_data_mining.prep_auto_on);
-- turn on ridge regression
INSERT INTO glmc_sh_sample_settings (setting_name, setting_value) VALUES
(dbms_data_mining.glms_ridge_regression,
dbms_data_mining.glms_ridge_reg_enable);
INSERT INTO glmc_sh_sample_settings (setting_name, setting_value) VALUES
(dbms_data_mining.GLMS_SOLVER,
dbms_data_mining.GLMS_SOLVER_QR);
END;
/
commit;
BEGIN
DBMS_DATA_MINING.CREATE_MODEL(
model_name =>'GLMC_SH_Clas_sample',
mining_function =>dbms_data_mining.classification,
data_table_name =>'mining_data_build_v',
case_id_column_name =>'cust_id',
target_column_name =>'affinity_card',
settings_table_name =>'glmc_sh_sample_settings');
END;
/
-------------------------
-- DISPLAY MODEL SETTINGS
--
column setting_name format a30
column setting_value format a30
SELECT setting_name, setting_value
FROM user_mining_model_settings
WHERE model_name ='GLMC_SH_CLAS_SAMPLE'
ORDER BY setting_name;
--------------------------
-- DISPLAY MODEL SIGNATURE
--
column attribute_name format a40
column attribute_type format a20
SELECT attribute_name, attribute_type
FROM user_mining_model_attributes
WHERE model_name ='GLMC_SH_CLAS_SAMPLE'
ORDER BY attribute_name;
------------------------
-- DISPLAY MODEL DETAILS
--
-- Get a list of model views
col view_name format a30
col view_type format a50
SELECT view_name, view_type FROM user_mining_model_views
WHERE model_name='GLMC_SH_CLAS_SAMPLE'
ORDER BY view_name;
-- Global statistics
column numeric_value format 9999990.999
column string_value format a20
select name, numeric_value, string_value from DM$VGGLMC_SH_CLAS_SAMPLE
ORDER BY name;
-- Coefficient statistics
SETline120
column class format a20
column attribute_name format a20
column attribute_subname format a20
column attribute_value format a20
column partition_name format a20
SELECT*
FROM DM$VDGLMC_SH_CLAS_SAMPLE
WHERE attribute_name ='OCCUPATION'
ORDER BY target_value, attribute_name, attribute_value;
-- Limited row diagnostics - working_residuals only, others are NULL
SELECT CASE_id, TARGET_value, TARGET_value_prob, working_residual
FROM dm$vaGLMC_SH_Clas_sample
WHERE case_id <=101510
ORDER BY case_id;
-----------------------------------------------------------------------
-- TEST THE NEW MODEL
-----------------------------------------------------------------------
------------------------------------
-- COMPUTE METRICS TO TEST THE MODEL
--
-- The queries shown below demonstrate the use of new SQL data mining functions
-- along with analytic functions to compute various test metrics.
--
-- Modelname: glmc_sh_clas_sample
-- Target attribute: affinity_card
-- Positive target value: 1
-- (Change these as appropriate for a different example)
-- Compute CONFUSION MATRIX
--
-- This query demonstates how to generate a confusion matrix using the new
-- SQL prediction functions for scoring. The returned columns match the
-- schema of the table generated by COMPUTE_CONFUSION_MATRIX procedure.
--
SELECT affinity_card AS actual_target_value,
PREDICTION(glmc_sh_clas_sample USING *) AS predicted_target_value,
COUNT(*) AS value
FROM mining_data_test_v
GROUP BY affinity_card, PREDICTION(glmc_sh_clas_sample USING *)
ORDER BY1, 2;
-- Compute ACCURACY
--
column accuracy format 9.99
SELECTSUM(correct)/COUNT(*) AS accuracy
FROM (SELECT DECODE(affinity_card,
PREDICTION(glmc_sh_clas_sample USING *), 1, 0) AS correct
FROM mining_data_test_v);
-- Compute AUC (Area Under the roc Curve)
--
-- See notes on ROC Curve and AUC computation above
--
column auc format 9.99
WITH
pos_prob_and_counts AS (
SELECT PREDICTION_PROBABILITY(glmc_sh_clas_sample, 1 USING *) pos_prob,
DECODE(affinity_card, 1, 1, 0) pos_cnt
FROM mining_data_test_v
),
tpf_fpf AS (
SELECT pos_cnt,
SUM(pos_cnt) OVER (ORDER BY pos_prob DESC) /
SUM(pos_cnt) OVER () tpf,
SUM(1- pos_cnt) OVER (ORDER BY pos_prob DESC) /
SUM(1- pos_cnt) OVER () fpf
FROM pos_prob_and_counts
),
trapezoid_areas AS (
SELECT0.5* (fpf - LAG(fpf, 1, 0) OVER (ORDER BY fpf, tpf)) *
(tpf + LAG(tpf, 1, 0) OVER (ORDER BY fpf, tpf)) area
FROM tpf_fpf
WHERE pos_cnt =1
OR (tpf =1AND fpf =1)
)
SELECTSUM(area) auc
FROM trapezoid_areas;
-- Judging from the accuracy and AUC, the ridge regression model
-- and feature selection/generation model are of approximately equal
-- quality
--------------------------------------------------------------------------
-- SCORE DATA
--------------------------------------------------------------------------
-- Now that the model has an invalid covariance matrix, it is
-- no longer possible to obtain confidence bounds.
-- So the lower (PL) and upper (PU) confidence bounds are NULL
set long 10000
SELECT PREDICTION(GLMC_SH_Clas_sample USING *) pr,
PREDICTION_PROBABILITY(GLMC_SH_Clas_sample USING *) pb,
PREDICTION_BOUNDS(GLMC_SH_Clas_sample USING *).lower pl,
PREDICTION_BOUNDS(GLMC_SH_Clas_sample USING *).upper pu,
PREDICTION_DETAILS(GLMC_SH_Clas_sample USING *) pd
FROM mining_data_apply_v
WHERE CUST_ID <=100010
ORDER BY CUST_ID;