Skip to content

Commit e02eaff

Browse files
authored
fix for np.float32 serialization (#589)
1 parent db9523c commit e02eaff

File tree

4 files changed

+150
-82
lines changed

4 files changed

+150
-82
lines changed

pgml-extension/examples/finetune.sql

Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
-- Exit on error (psql)
2+
\set ON_ERROR_STOP true
3+
\timing on
4+
5+
6+
SELECT pgml.load_dataset('kde4', kwargs => '{"lang1": "en", "lang2": "es"}');
7+
CREATE OR REPLACE VIEW kde4_en_to_es AS
8+
SELECT translation->>'en' AS "en", translation->>'es' AS "es"
9+
FROM pgml.kde4
10+
LIMIT 10;
11+
SELECT pgml.tune(
12+
'Translate English to Spanish',
13+
task => 'translation',
14+
relation_name => 'kde4_en_to_es',
15+
y_column_name => 'es', -- translate into spanish
16+
model_name => 'Helsinki-NLP/opus-mt-en-es',
17+
hyperparams => '{
18+
"learning_rate": 2e-5,
19+
"per_device_train_batch_size": 16,
20+
"per_device_eval_batch_size": 16,
21+
"num_train_epochs": 1,
22+
"weight_decay": 0.01,
23+
"max_length": 128
24+
}',
25+
test_size => 0.5,
26+
test_sampling => 'last'
27+
);
28+
29+
SELECT pgml.load_dataset('imdb');
30+
SELECT pgml.tune(
31+
'IMDB Review Sentiment',
32+
task => 'text-classification',
33+
relation_name => 'pgml.imdb',
34+
y_column_name => 'label',
35+
model_name => 'distilbert-base-uncased',
36+
hyperparams => '{
37+
"learning_rate": 2e-5,
38+
"per_device_train_batch_size": 16,
39+
"per_device_eval_batch_size": 16,
40+
"num_train_epochs": 1,
41+
"weight_decay": 0.01
42+
}',
43+
test_size => 0.5,
44+
test_sampling => 'last'
45+
);
46+
SELECT pgml.predict('IMDB Review Sentiment', 'I love SQL');
47+
48+
SELECT pgml.load_dataset('squad_v2');
49+
SELECT pgml.tune(
50+
'SQuAD Q&A v2',
51+
'question-answering',
52+
'pgml.squad_v2',
53+
'answers',
54+
'deepset/roberta-base-squad2',
55+
hyperparams => '{
56+
"evaluation_strategy": "epoch",
57+
"learning_rate": 2e-5,
58+
"per_device_train_batch_size": 16,
59+
"per_device_eval_batch_size": 16,
60+
"num_train_epochs": 1,
61+
"weight_decay": 0.01,
62+
"max_length": 384,
63+
"stride": 128
64+
}',
65+
test_size => 11873,
66+
test_sampling => 'last'
67+
);
68+
69+
70+
SELECT pgml.load_dataset('billsum', kwargs => '{"split": "ca_test"}');
71+
CREATE OR REPLACE VIEW billsum_training_data
72+
AS SELECT title || '\n' || text AS text, summary FROM pgml.billsum;
73+
SELECT pgml.tune(
74+
'Legal Summarization',
75+
task => 'summarization',
76+
relation_name => 'billsum_training_data',
77+
y_column_name => 'summary',
78+
model_name => 'sshleifer/distilbart-xsum-12-1',
79+
hyperparams => '{
80+
"learning_rate": 2e-5,
81+
"per_device_train_batch_size": 2,
82+
"per_device_eval_batch_size": 2,
83+
"num_train_epochs": 1,
84+
"weight_decay": 0.01,
85+
"max_input_length": 1024,
86+
"max_summary_length": 128
87+
}',
88+
test_size => 0.01,
89+
test_sampling => 'last'
90+
);

pgml-extension/examples/transformers.sql

Lines changed: 50 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -32,89 +32,60 @@ SELECT pgml.transform(
3232
'Dominic Cobb is the foremost practitioner of the artistic science of extraction, inserting oneself into a subject''s dreams to obtain hidden information without the subject knowing, a concept taught to him by his professor father-in-law, Dr. Stephen Miles. Dom''s associates are Miles'' former students, who Dom requires as he has given up being the dream architect for reasons he won''t disclose. Dom''s primary associate, Arthur, believes it has something to do with Dom''s deceased wife, Mal, who often figures prominently and violently in those dreams, or Dom''s want to "go home" (get back to his own reality, which includes two young children). Dom''s work is generally in corporate espionage. As the subjects don''t want the information to get into the wrong hands, the clients have zero tolerance for failure. Dom is also a wanted man, as many of his past subjects have learned what Dom has done to them. One of those subjects, Mr. Saito, offers Dom a job he can''t refuse: to take the concept one step further into inception, namely planting thoughts into the subject''s dreams without them knowing. Inception can fundamentally alter that person as a being. Saito''s target is Robert Michael Fischer, the heir to an energy business empire, which has the potential to rule the world if continued on the current trajectory. Beyond the complex logistics of the dream architecture of the case and some unknowns concerning Fischer, the biggest obstacles in success for the team become worrying about one aspect of inception which Cobb fails to disclose to the other team members prior to the job, and Cobb''s newest associate Ariadne''s belief that Cobb''s own subconscious, especially as it relates to Mal, may be taking over what happens in the dreams.'
3333
]
3434
);
35+
SELECT pgml.transform(
36+
inputs => ARRAY[
37+
'I love how amazingly simple ML has become!',
38+
'I hate doing mundane and thankless tasks. ☹️'
39+
],
40+
task => '{"task": "text-classification",
41+
"model": "finiteautomata/bertweet-base-sentiment-analysis"
42+
}'::JSONB
43+
) AS positivity;
3544

36-
SELECT pgml.load_dataset('kde4', kwargs => '{"lang1": "en", "lang2": "es"}');
37-
CREATE OR REPLACE VIEW kde4_en_to_es AS
38-
SELECT translation->>'en' AS "en", translation->>'es' AS "es"
39-
FROM pgml.kde4
40-
LIMIT 10;
41-
SELECT pgml.tune(
42-
'Translate English to Spanish',
43-
task => 'translation',
44-
relation_name => 'kde4_en_to_es',
45-
y_column_name => 'es', -- translate into spanish
46-
model_name => 'Helsinki-NLP/opus-mt-en-es',
47-
hyperparams => '{
48-
"learning_rate": 2e-5,
49-
"per_device_train_batch_size": 16,
50-
"per_device_eval_batch_size": 16,
51-
"num_train_epochs": 1,
52-
"weight_decay": 0.01,
53-
"max_length": 128
54-
}',
55-
test_size => 0.5,
56-
test_sampling => 'last'
57-
);
45+
SELECT pgml.transform(
46+
task => 'text-classification',
47+
inputs => ARRAY[
48+
'I love how amazingly simple ML has become!',
49+
'I hate doing mundane and thankless tasks. ☹️'
50+
]
51+
) AS positivity;
52+
53+
SELECT pgml.transform(
54+
inputs => ARRAY[
55+
'Stocks rallied and the British pound gained.',
56+
'Stocks making the biggest moves midday: Nvidia, Palantir and more'
57+
],
58+
task => '{"task": "text-classification",
59+
"model": "ProsusAI/finbert"
60+
}'::JSONB
61+
) AS market_sentiment;
5862

59-
SELECT pgml.load_dataset('imdb');
60-
SELECT pgml.tune(
61-
'IMDB Review Sentiment',
62-
task => 'text-classification',
63-
relation_name => 'pgml.imdb',
64-
y_column_name => 'label',
65-
model_name => 'distilbert-base-uncased',
66-
hyperparams => '{
67-
"learning_rate": 2e-5,
68-
"per_device_train_batch_size": 16,
69-
"per_device_eval_batch_size": 16,
70-
"num_train_epochs": 1,
71-
"weight_decay": 0.01
72-
}',
73-
test_size => 0.5,
74-
test_sampling => 'last'
63+
SELECT pgml.transform(
64+
inputs => ARRAY[
65+
'I have a problem with my iphone that needs to be resolved asap!!'
66+
],
67+
task => '{"task": "zero-shot-classification",
68+
"model": "roberta-large-mnli"
69+
}'::JSONB,
70+
args => '{"candidate_labels": ["urgent", "not urgent", "phone", "tablet", "computer"]
71+
}'::JSONB
72+
) AS zero_shot;
73+
74+
SELECT pgml.transform(
75+
inputs => ARRAY[
76+
'Hugging Face is a French company based in New York City.'
77+
],
78+
task => 'token-classification'
7579
);
76-
SELECT pgml.predict('IMDB Review Sentiment', 'I love SQL');
7780

78-
SELECT pgml.load_dataset('squad_v2');
79-
SELECT pgml.tune(
80-
'SQuAD Q&A v2',
81+
SELECT pgml.transform(
8182
'question-answering',
82-
'pgml.squad_v2',
83-
'answers',
84-
'deepset/roberta-base-squad2',
85-
hyperparams => '{
86-
"evaluation_strategy": "epoch",
87-
"learning_rate": 2e-5,
88-
"per_device_train_batch_size": 16,
89-
"per_device_eval_batch_size": 16,
90-
"num_train_epochs": 1,
91-
"weight_decay": 0.01,
92-
"max_length": 384,
93-
"stride": 128
94-
}',
95-
test_size => 11873,
96-
test_sampling => 'last'
97-
);
83+
inputs => ARRAY[
84+
'{
85+
"question": "Am I dreaming?",
86+
"context": "I got a good nights sleep last night and started a simple tutorial over my cup of morning coffee. The capabilities seem unreal, compared to what I came to expect from the simple SQL standard I studied so long ago. The answer is staring me in the face, and I feel the uncanny call from beyond the screen to check the results."
87+
}'
88+
]
89+
) AS answer;
9890

9991

100-
SELECT pgml.load_dataset('billsum', kwargs => '{"split": "ca_test"}');
101-
CREATE OR REPLACE VIEW billsum_training_data
102-
AS SELECT title || '\n' || text AS text, summary FROM pgml.billsum;
103-
SELECT pgml.tune(
104-
'Legal Summarization',
105-
task => 'summarization',
106-
relation_name => 'billsum_training_data',
107-
y_column_name => 'summary',
108-
model_name => 'sshleifer/distilbart-xsum-12-1',
109-
hyperparams => '{
110-
"learning_rate": 2e-5,
111-
"per_device_train_batch_size": 2,
112-
"per_device_eval_batch_size": 2,
113-
"num_train_epochs": 1,
114-
"weight_decay": 0.01,
115-
"max_input_length": 1024,
116-
"max_summary_length": 128
117-
}',
118-
test_size => 0.01,
119-
test_sampling => 'last'
120-
);

pgml-extension/src/bindings/transformers.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import math
44
import shutil
55
import time
6-
6+
import numpy as np
77

88
import datasets
99
from rouge import Rouge
@@ -40,6 +40,12 @@
4040
__cache_transformer_by_model_id = {}
4141
__cache_sentence_transformer_by_name = {}
4242

43+
class NumpyJSONEncoder(json.JSONEncoder):
44+
def default(self, obj):
45+
if isinstance(obj, np.float32):
46+
return float(obj)
47+
return super().default(obj)
48+
4349
def transform(task, args, inputs):
4450
task = json.loads(task)
4551
args = json.loads(args)
@@ -50,7 +56,7 @@ def transform(task, args, inputs):
5056
if pipe.task == "question-answering":
5157
inputs = [json.loads(input) for input in inputs]
5258

53-
return json.dumps(pipe(inputs, **args))
59+
return json.dumps(pipe(inputs, **args), cls = NumpyJSONEncoder)
5460

5561
def embed(transformer, text, kwargs):
5662
kwargs = json.loads(kwargs)
@@ -101,7 +107,7 @@ def tokenize_summarization(tokenizer, max_length, x, y):
101107
return datasets.Dataset.from_dict(encoding.data)
102108

103109
def tokenize_text_generation(tokenizer, max_length, y):
104-
encoding = tokenizer(y, max_length=max_length)
110+
encoding = tokenizer(y, max_length=max_length, truncation=True, padding="max_length")
105111
return datasets.Dataset.from_dict(encoding.data)
106112

107113
def tokenize_question_answering(tokenizer, max_length, x, y):

pgml-extension/tests/test.sql

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,3 +27,4 @@ SELECT pgml.load_dataset('wine');
2727
\i examples/multi_classification.sql
2828
\i examples/regression.sql
2929
\i examples/vectors.sql
30+

0 commit comments

Comments
 (0)