Skip to content

Commit 8fb8bb4

Browse files
committed
Format using ruff
1 parent 4e93532 commit 8fb8bb4

File tree

13 files changed

+157
-35
lines changed

13 files changed

+157
-35
lines changed

examples/gbm.py

Lines changed: 27 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,21 @@
1818
def classification():
1919
# Generate a random binary classification problem.
2020
X, y = make_classification(
21-
n_samples=350, n_features=15, n_informative=10, random_state=1111, n_classes=2, class_sep=1.0, n_redundant=0
21+
n_samples=350,
22+
n_features=15,
23+
n_informative=10,
24+
random_state=1111,
25+
n_classes=2,
26+
class_sep=1.0,
27+
n_redundant=0,
28+
)
29+
X_train, X_test, y_train, y_test = train_test_split(
30+
X, y, test_size=0.15, random_state=1111
2231
)
23-
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=1111)
2432

25-
model = GradientBoostingClassifier(n_estimators=50, max_depth=4, max_features=8, learning_rate=0.1)
33+
model = GradientBoostingClassifier(
34+
n_estimators=50, max_depth=4, max_features=8, learning_rate=0.1
35+
)
2636
model.fit(X_train, y_train)
2737
predictions = model.predict(X_test)
2838
print(predictions)
@@ -34,14 +44,25 @@ def classification():
3444
def regression():
3545
# Generate a random regression problem
3646
X, y = make_regression(
37-
n_samples=500, n_features=5, n_informative=5, n_targets=1, noise=0.05, random_state=1111, bias=0.5
47+
n_samples=500,
48+
n_features=5,
49+
n_informative=5,
50+
n_targets=1,
51+
noise=0.05,
52+
random_state=1111,
53+
bias=0.5,
54+
)
55+
X_train, X_test, y_train, y_test = train_test_split(
56+
X, y, test_size=0.1, random_state=1111
3857
)
39-
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=1111)
4058

4159
model = GradientBoostingRegressor(n_estimators=25, max_depth=5, max_features=3)
4260
model.fit(X_train, y_train)
4361
predictions = model.predict(X_test)
44-
print("regression, mse: %s" % mean_squared_error(y_test.flatten(), predictions.flatten()))
62+
print(
63+
"regression, mse: %s"
64+
% mean_squared_error(y_test.flatten(), predictions.flatten())
65+
)
4566

4667

4768
if __name__ == "__main__":

examples/kmeans.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,9 @@
55

66

77
def kmeans_example(plot=False):
8-
X, y = make_blobs(centers=4, n_samples=500, n_features=2, shuffle=True, random_state=42)
8+
X, y = make_blobs(
9+
centers=4, n_samples=500, n_features=2, shuffle=True, random_state=42
10+
)
911
clusters = len(np.unique(y))
1012
k = KMeans(K=clusters, max_iters=150, init="++")
1113
k.fit(X)

examples/linear_models.py

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,17 @@
1717
def regression():
1818
# Generate a random regression problem
1919
X, y = make_regression(
20-
n_samples=10000, n_features=100, n_informative=75, n_targets=1, noise=0.05, random_state=1111, bias=0.5
20+
n_samples=10000,
21+
n_features=100,
22+
n_informative=75,
23+
n_targets=1,
24+
noise=0.05,
25+
random_state=1111,
26+
bias=0.5,
27+
)
28+
X_train, X_test, y_train, y_test = train_test_split(
29+
X, y, test_size=0.25, random_state=1111
2130
)
22-
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1111)
2331

2432
model = LinearRegression(lr=0.01, max_iters=2000, penalty="l2", C=0.03)
2533
model.fit(X_train, y_train)
@@ -30,9 +38,16 @@ def regression():
3038
def classification():
3139
# Generate a random binary classification problem.
3240
X, y = make_classification(
33-
n_samples=1000, n_features=100, n_informative=75, random_state=1111, n_classes=2, class_sep=2.5
41+
n_samples=1000,
42+
n_features=100,
43+
n_informative=75,
44+
random_state=1111,
45+
n_classes=2,
46+
class_sep=2.5,
47+
)
48+
X_train, X_test, y_train, y_test = train_test_split(
49+
X, y, test_size=0.1, random_state=1111
3450
)
35-
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=1111)
3651

3752
model = LogisticRegression(lr=0.01, max_iters=500, penalty="l1", C=0.01)
3853
model.fit(X_train, y_train)

examples/naive_bayes.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,17 @@
88
def classification():
99
# Generate a random binary classification problem.
1010
X, y = make_classification(
11-
n_samples=1000, n_features=10, n_informative=10, random_state=1111, n_classes=2, class_sep=2.5, n_redundant=0
11+
n_samples=1000,
12+
n_features=10,
13+
n_informative=10,
14+
random_state=1111,
15+
n_classes=2,
16+
class_sep=2.5,
17+
n_redundant=0,
18+
)
19+
X_train, X_test, y_train, y_test = train_test_split(
20+
X, y, test_size=0.1, random_state=1111
1221
)
13-
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=1111)
1422

1523
model = NaiveBayesClassifier()
1624
model.fit(X_train, y_train)

examples/nearest_neighbors.py

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,17 @@
1313
def regression():
1414
# Generate a random regression problem
1515
X, y = make_regression(
16-
n_samples=500, n_features=5, n_informative=5, n_targets=1, noise=0.05, random_state=1111, bias=0.5
16+
n_samples=500,
17+
n_features=5,
18+
n_informative=5,
19+
n_targets=1,
20+
noise=0.05,
21+
random_state=1111,
22+
bias=0.5,
23+
)
24+
X_train, X_test, y_train, y_test = train_test_split(
25+
X, y, test_size=0.25, random_state=1111
1726
)
18-
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1111)
1927

2028
model = knn.KNNRegressor(k=5, distance_func=distance.euclidean)
2129
model.fit(X_train, y_train)
@@ -35,7 +43,9 @@ def classification():
3543
class_sep=1.5,
3644
)
3745

38-
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=1111)
46+
X_train, X_test, y_train, y_test = train_test_split(
47+
X, y, test_size=0.1, random_state=1111
48+
)
3949

4050
clf = knn.KNNClassifier(k=5, distance_func=distance.euclidean)
4151

examples/nnet_convnet_mnist.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,14 @@
33
from mla.datasets import load_mnist
44
from mla.metrics import accuracy
55
from mla.neuralnet import NeuralNet
6-
from mla.neuralnet.layers import Activation, Convolution, MaxPooling, Flatten, Dropout, Parameters
6+
from mla.neuralnet.layers import (
7+
Activation,
8+
Convolution,
9+
MaxPooling,
10+
Flatten,
11+
Dropout,
12+
Parameters,
13+
)
714
from mla.neuralnet.layers import Dense
815
from mla.neuralnet.optimizers import Adadelta
916
from mla.utils import one_hot

examples/nnet_mlp.py

Lines changed: 20 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,17 @@
2323
def classification():
2424
# Generate a random binary classification problem.
2525
X, y = make_classification(
26-
n_samples=1000, n_features=100, n_informative=75, random_state=1111, n_classes=2, class_sep=2.5
26+
n_samples=1000,
27+
n_features=100,
28+
n_informative=75,
29+
random_state=1111,
30+
n_classes=2,
31+
class_sep=2.5,
2732
)
2833
y = one_hot(y)
29-
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=1111)
34+
X_train, X_test, y_train, y_test = train_test_split(
35+
X, y, test_size=0.15, random_state=1111
36+
)
3037

3138
model = NeuralNet(
3239
layers=[
@@ -51,9 +58,18 @@ def classification():
5158

5259
def regression():
5360
# Generate a random regression problem
54-
X, y = make_regression(n_samples=5000, n_features=25, n_informative=25, n_targets=1, random_state=100, noise=0.05)
61+
X, y = make_regression(
62+
n_samples=5000,
63+
n_features=25,
64+
n_informative=25,
65+
n_targets=1,
66+
random_state=100,
67+
noise=0.05,
68+
)
5569
y *= 0.01
56-
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=1111)
70+
X_train, X_test, y_train, y_test = train_test_split(
71+
X, y, test_size=0.1, random_state=1111
72+
)
5773

5874
model = NeuralNet(
5975
layers=[

examples/nnet_rnn_binary_add.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,9 @@ def addition_dataset(dim=10, n_samples=10000, batch_size=64):
3838
# Generate target variable (a+b)
3939
y[i, :, 0] = list(reversed([int(x) for x in binary_format.format(a + b)]))
4040

41-
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1111)
41+
X_train, X_test, y_train, y_test = train_test_split(
42+
X, y, test_size=0.2, random_state=1111
43+
)
4244

4345
# Round number of examples for batch processing
4446
train_b = (X_train.shape[0] // batch_size) * batch_size

examples/pca.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,18 @@
1212

1313
# Generate a random binary classification problem.
1414
X, y = make_classification(
15-
n_samples=1000, n_features=100, n_informative=75, random_state=1111, n_classes=2, class_sep=2.5
15+
n_samples=1000,
16+
n_features=100,
17+
n_informative=75,
18+
random_state=1111,
19+
n_classes=2,
20+
class_sep=2.5,
1621
)
1722

1823

19-
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1111)
24+
X_train, X_test, y_train, y_test = train_test_split(
25+
X, y, test_size=0.25, random_state=1111
26+
)
2027

2128
for s in ["svd", "eigen"]:
2229
p = PCA(15, solver=s)

examples/random_forest.py

Lines changed: 25 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,32 +19,51 @@
1919
def classification():
2020
# Generate a random binary classification problem.
2121
X, y = make_classification(
22-
n_samples=500, n_features=10, n_informative=10, random_state=1111, n_classes=2, class_sep=2.5, n_redundant=0
22+
n_samples=500,
23+
n_features=10,
24+
n_informative=10,
25+
random_state=1111,
26+
n_classes=2,
27+
class_sep=2.5,
28+
n_redundant=0,
2329
)
2430

25-
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=1111)
31+
X_train, X_test, y_train, y_test = train_test_split(
32+
X, y, test_size=0.15, random_state=1111
33+
)
2634

2735
model = RandomForestClassifier(n_estimators=10, max_depth=4)
2836
model.fit(X_train, y_train)
2937

3038
predictions_prob = model.predict(X_test)[:, 1]
3139
predictions = np.argmax(model.predict(X_test), axis=1)
32-
#print(predictions.shape)
40+
# print(predictions.shape)
3341
print("classification, roc auc score: %s" % roc_auc_score(y_test, predictions_prob))
3442
print("classification, accuracy score: %s" % accuracy_score(y_test, predictions))
3543

3644

3745
def regression():
3846
# Generate a random regression problem
3947
X, y = make_regression(
40-
n_samples=500, n_features=5, n_informative=5, n_targets=1, noise=0.05, random_state=1111, bias=0.5
48+
n_samples=500,
49+
n_features=5,
50+
n_informative=5,
51+
n_targets=1,
52+
noise=0.05,
53+
random_state=1111,
54+
bias=0.5,
55+
)
56+
X_train, X_test, y_train, y_test = train_test_split(
57+
X, y, test_size=0.1, random_state=1111
4158
)
42-
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=1111)
4359

4460
model = RandomForestRegressor(n_estimators=50, max_depth=10, max_features=3)
4561
model.fit(X_train, y_train)
4662
predictions = model.predict(X_test)
47-
print("regression, mse: %s" % mean_squared_error(y_test.flatten(), predictions.flatten()))
63+
print(
64+
"regression, mse: %s"
65+
% mean_squared_error(y_test.flatten(), predictions.flatten())
66+
)
4867

4968

5069
if __name__ == "__main__":

0 commit comments

Comments
 (0)