forked from TheAlgorithms/Python
- Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapriori_algorithm.py
113 lines (87 loc) · 3.82 KB
/
apriori_algorithm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
"""
Apriori Algorithm is a Association rule mining technique, also known as market basket
analysis, aims to discover interesting relationships or associations among a set of
items in a transactional or relational database.
For example, Apriori Algorithm states: "If a customer buys item A and item B, then they
are likely to buy item C." This rule suggests a relationship between items A, B, and C,
indicating that customers who purchased A and B are more likely to also purchase item C.
WIKI: https://en.wikipedia.org/wiki/Apriori_algorithm
Examples: https://www.kaggle.com/code/earthian/apriori-association-rules-mining
"""
fromitertoolsimportcombinations
defload_data() ->list[list[str]]:
"""
Returns a sample transaction dataset.
>>> load_data()
[['milk'], ['milk', 'butter'], ['milk', 'bread'], ['milk', 'bread', 'chips']]
"""
return [["milk"], ["milk", "butter"], ["milk", "bread"], ["milk", "bread", "chips"]]
defprune(itemset: list, candidates: list, length: int) ->list:
"""
Prune candidate itemsets that are not frequent.
The goal of pruning is to filter out candidate itemsets that are not frequent. This
is done by checking if all the (k-1) subsets of a candidate itemset are present in
the frequent itemsets of the previous iteration (valid subsequences of the frequent
itemsets from the previous iteration).
Prunes candidate itemsets that are not frequent.
>>> itemset = ['X', 'Y', 'Z']
>>> candidates = [['X', 'Y'], ['X', 'Z'], ['Y', 'Z']]
>>> prune(itemset, candidates, 2)
[['X', 'Y'], ['X', 'Z'], ['Y', 'Z']]
>>> itemset = ['1', '2', '3', '4']
>>> candidates = ['1', '2', '4']
>>> prune(itemset, candidates, 3)
[]
"""
pruned= []
forcandidateincandidates:
is_subsequence=True
foritemincandidate:
ifitemnotinitemsetoritemset.count(item) <length-1:
is_subsequence=False
break
ifis_subsequence:
pruned.append(candidate)
returnpruned
defapriori(data: list[list[str]], min_support: int) ->list[tuple[list[str], int]]:
"""
Returns a list of frequent itemsets and their support counts.
>>> data = [['A', 'B', 'C'], ['A', 'B'], ['A', 'C'], ['A', 'D'], ['B', 'C']]
>>> apriori(data, 2)
[(['A', 'B'], 1), (['A', 'C'], 2), (['B', 'C'], 2)]
>>> data = [['1', '2', '3'], ['1', '2'], ['1', '3'], ['1', '4'], ['2', '3']]
>>> apriori(data, 3)
[]
"""
itemset= [list(transaction) fortransactionindata]
frequent_itemsets= []
length=1
whileitemset:
# Count itemset support
counts= [0] *len(itemset)
fortransactionindata:
forj, candidateinenumerate(itemset):
ifall(itemintransactionforitemincandidate):
counts[j] +=1
# Prune infrequent itemsets
itemset= [itemfori, iteminenumerate(itemset) ifcounts[i] >=min_support]
# Append frequent itemsets (as a list to maintain order)
fori, iteminenumerate(itemset):
frequent_itemsets.append((sorted(item), counts[i]))
length+=1
itemset=prune(itemset, list(combinations(itemset, length)), length)
returnfrequent_itemsets
if__name__=="__main__":
"""
Apriori algorithm for finding frequent itemsets.
Args:
data: A list of transactions, where each transaction is a list of items.
min_support: The minimum support threshold for frequent itemsets.
Returns:
A list of frequent itemsets along with their support counts.
"""
importdoctest
doctest.testmod()
# user-defined threshold or minimum support level
frequent_itemsets=apriori(data=load_data(), min_support=2)
print("\n".join(f"{itemset}: {support}"foritemset, supportinfrequent_itemsets))