Skip to content

Commit

Permalink
ipynb to py conversion process added
Browse files Browse the repository at this point in the history
  • Loading branch information
arun477 committed Sep 13, 2023
1 parent 0bbec58 commit 53075d2
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 152 deletions.
33 changes: 26 additions & 7 deletions mnist.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,7 @@
"metadata": {},
"outputs": [],
"source": [
"# model definition\n",
"def linear_classifier():\n",
" return nn.Sequential(\n",
" Reshape((-1, 784)),\n",
Expand Down Expand Up @@ -209,7 +210,11 @@
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"metadata": {
"tags": [
"exclude"
]
},
"outputs": [],
"source": [
"# with open('./linear_classifier.pkl', 'wb') as model_file:\n",
Expand All @@ -218,33 +223,47 @@
},
{
"cell_type": "markdown",
"metadata": {},
"metadata": {
"tags": [
"exclude"
]
},
"source": [
"#### commit to .py file for deployment"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {},
"execution_count": 43,
"metadata": {
"tags": [
"exclude"
]
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[NbConvertApp] Converting notebook mnist.ipynb to script\n",
"[NbConvertApp] Writing 6388 bytes to mnist.py\n"
"[NbConvertApp] Writing 3123 bytes to mnist.py\n"
]
}
],
"source": [
"!jupyter nbconvert --to script mnist.ipynb"
"# !jupyter nbconvert --to script mnist.ipynb\n",
"!jupyter nbconvert --to script --TagRemovePreprocessor.remove_cell_tags=\"exclude\" --TemplateExporter.exclude_input_prompt=True mnist.ipynb\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"tags": [
"exclude"
]
},
"outputs": [],
"source": [
"# from IPython.display import HTML, display, Image\n",
Expand Down
146 changes: 1 addition & 145 deletions mnist.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
#!/usr/bin/env python
# coding: utf-8

# In[1]:


import torch
from torch import nn
import torch.nn.functional as F
Expand All @@ -19,17 +16,11 @@
plt.rcParams['figure.figsize'] = [2, 2]


# In[2]:


dataset_nm = 'mnist'
x,y = 'image', 'label'
ds = load_dataset(dataset_nm)


# In[3]:


def transform_ds(b):
b[x] = [TF.to_tensor(ele) for ele in b[x]]
return b
Expand All @@ -38,9 +29,6 @@ def transform_ds(b):
plt.imshow(dst['train'][0]['image'].permute(1,2,0));


# In[4]:


bs = 1024
class DataLoaders:
def __init__(self, train_ds, valid_ds, bs, collate_fn, **kwargs):
Expand All @@ -56,9 +44,6 @@ def collate_fn(b):
xb.shape, yb.shape


# In[5]:


class Reshape(nn.Module):
def __init__(self, dim):
super().__init__()
Expand All @@ -68,9 +53,6 @@ def forward(self, x):
return x.reshape(self.dim)


# In[6]:


def cnn_classifier():
ks,stride = 3,2
return nn.Sequential(
Expand All @@ -89,9 +71,7 @@ def cnn_classifier():
)


# In[7]:


# model definition
def linear_classifier():
return nn.Sequential(
Reshape((-1, 784)),
Expand All @@ -103,9 +83,6 @@ def linear_classifier():
)


# In[8]:


model = linear_classifier()
lr = 0.1
max_lr = 0.1
Expand Down Expand Up @@ -133,128 +110,7 @@ def linear_classifier():



# In[31]:


# with open('./linear_classifier.pkl', 'wb') as model_file:
# pickle.dump(model, model_file)


# #### commit to .py file for deployment

# In[32]:


get_ipython().system('jupyter nbconvert --to script mnist.ipynb')


# In[29]:


get_ipython().system('pip3 install markupsafe')



# In[ ]:


# from IPython.display import HTML, display, Image
# %%html
# <style>
# #whiteboard {
# border: 3px solid black;
# border-radius: 6px;
# background-color: #FFFFFF;
# }
# #capture-button {
# background-color: #3F52D9;
# color: white;
# border: none;
# padding: 10px 20px;
# cursor: pointer;
# font-size: 16px;
# border-radius: 3px;
# margin-top: 10px;
# width: 190px;
# margin-right: 20px;
# }
# #clear-button {
# background-color: #FF0000,;
# color: black;
# border: none;
# padding: 10px 20px;
# cursor: pointer;
# font-size: 16px;
# border-radius: 3px;
# margin-top: 10px;
# width: 190px;
# }
# #container {
# display: flex;
# flex-direction: column; /* Arrange children vertically */
# align-items: center; /* Center horizontally */
# justify-content: center;
# }
# #btn-container {
# display: flex;
# flex-direction: row; /* Arrange children vertically */
# align-items: center; /* Center horizontally */
# }

# </style>
# <div id='container'>
# <canvas id="whiteboard" width="400" height="200" fill_rect='white'></canvas>
# <div id='btn-container'>
# <button id="capture-button">Predict</button>
# <button id="clear-button">Clear</button>
# </div>

# </div>
# <script>
# var canvas = document.getElementById('whiteboard');
# var context = canvas.getContext('2d');
# var drawing = false;
# canvas.addEventListener('mousedown', function (e) {
# drawing = true;
# context.beginPath();
# context.moveTo(e.clientX - canvas.getBoundingClientRect().left, e.clientY - canvas.getBoundingClientRect().top);
# });
# canvas.addEventListener('mousemove', function (e) {
# if (drawing) {
# context.lineTo(e.clientX - canvas.getBoundingClientRect().left, e.clientY - canvas.getBoundingClientRect().top);
# context.stroke();
# }
# });
# canvas.addEventListener('mouseup', function () {
# drawing = false;
# });
# canvas.addEventListener('mouseout', function () {
# drawing = false;
# });

# var clearButton = document.getElementById('clear-button');
# clearButton.addEventListener('click', function () {
# context.clearRect(0, 0, canvas.width, canvas.height);
# });

# var captureButton = document.getElementById('capture-button');
# captureButton.addEventListener('click', function () {
# // Convert the canvas content to a data URL (image)
# var imageData = canvas.toDataURL("image/png");

# // Send the image data to the Jupyter kernel variable
# IPython.notebook.kernel.execute('image_data = "' + imageData + '"');
# });
# </script>


# In[ ]:





# In[ ]:



Expand Down

0 comments on commit 53075d2

Please sign in to comment.