Skip to content

Commit

Permalink
Replace at -> pt. (pymc-devs#485)
Browse files Browse the repository at this point in the history
* Replace at -> pt.

* Rename two remaining at->pt.

* Add pytensor to intersphinx.

* Add pytensor to pre-commit.

* remove aesara from intersphinx mapping

Co-authored-by: Oriol (ZBook) <oriol.abril.pla@gmail.com>
  • Loading branch information
twiecki and OriolAbril authored Dec 28, 2022
1 parent 5238dc3 commit ff67bea
Show file tree
Hide file tree
Showing 48 changed files with 284 additions and 285 deletions.
3 changes: 1 addition & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -83,15 +83,14 @@ repos:
examples/howto/custom_distribution.ipynb)
entry: >
(?x)(arviz-devs.github.io|
aesara.readthedocs.io|
aeppl.readthedocs.io|
pymc-experimental.readthedocs.io|
docs.pymc.io|
numpy.org/doc|
pymc-examples.readthedocs.io|
docs.python.org|
xarray.pydata.org
python.arviz.org|
pytensor.readthedocs.io|
docs.xarray.dev|
www.pymc.io|
docs.scipy.org/doc)
Expand Down
2 changes: 1 addition & 1 deletion examples/case_studies/GEV.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
"import numpy as np\n",
"import pymc as pm\n",
"import pymc_experimental.distributions as pmx\n",
"import pytensor.tensor as at\n",
"import pytensor.tensor as pt\n",
"\n",
"from arviz.plots import plot_utils as azpu"
]
Expand Down
2 changes: 1 addition & 1 deletion examples/case_studies/GEV.myst.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ import matplotlib.pyplot as plt
import numpy as np
import pymc as pm
import pymc_experimental.distributions as pmx
import pytensor.tensor as at
import pytensor.tensor as pt
from arviz.plots import plot_utils as azpu
```
Expand Down
34 changes: 17 additions & 17 deletions examples/case_studies/binning.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@
"We are now in a position to sketch out a generative PyMC model:\n",
"\n",
"```python\n",
"import pytensor.tensor as at\n",
"import pytensor.tensor as pt\n",
"\n",
"with pm.Model() as model:\n",
" # priors\n",
Expand All @@ -81,7 +81,7 @@
" # generative process\n",
" probs = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), cutpoints))\n",
" probs = pm.math.concatenate([[0], probs, [1]])\n",
" probs = at.extra_ops.diff(probs)\n",
" probs = pt.extra_ops.diff(probs)\n",
" # likelihood\n",
" pm.Multinomial(\"counts\", p=probs, n=sum(counts), observed=counts)\n",
"```\n",
Expand All @@ -98,7 +98,7 @@
"simply concatenates the cumulative density at $-\\infty$ (which is zero) and at $\\infty$ (which is 1).\n",
"The third line\n",
"```python\n",
"probs = at.extra_ops.diff(probs)\n",
"probs = pt.extra_ops.diff(probs)\n",
"```\n",
"calculates the difference between consecutive cumulative densities to give the actual probability of a datum falling in any given bin.\n",
"\n",
Expand All @@ -125,7 +125,7 @@
"import numpy as np\n",
"import pandas as pd\n",
"import pymc as pm\n",
"import pytensor.tensor as at\n",
"import pytensor.tensor as pt\n",
"import seaborn as sns\n",
"\n",
"warnings.filterwarnings(action=\"ignore\", category=UserWarning)"
Expand Down Expand Up @@ -320,7 +320,7 @@
" mu = pm.Normal(\"mu\")\n",
"\n",
" probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1))\n",
" probs1 = at.extra_ops.diff(pm.math.concatenate([[0], probs1, [1]]))\n",
" probs1 = pt.extra_ops.diff(pm.math.concatenate([[0], probs1, [1]]))\n",
" pm.Multinomial(\"counts1\", p=probs1, n=c1.sum(), observed=c1.values)"
]
},
Expand Down Expand Up @@ -841,7 +841,7 @@
" mu = pm.Normal(\"mu\")\n",
"\n",
" probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d2))\n",
" probs2 = at.extra_ops.diff(pm.math.concatenate([[0], probs2, [1]]))\n",
" probs2 = pt.extra_ops.diff(pm.math.concatenate([[0], probs2, [1]]))\n",
" pm.Multinomial(\"counts2\", p=probs2, n=c2.sum(), observed=c2.values)"
]
},
Expand Down Expand Up @@ -1238,11 +1238,11 @@
" mu = pm.Normal(\"mu\")\n",
"\n",
" probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1))\n",
" probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pm.Deterministic(\"normal1_cdf\", probs1)\n",
"\n",
" probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d2))\n",
" probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pm.Deterministic(\"normal2_cdf\", probs2)\n",
"\n",
" pm.Multinomial(\"counts1\", p=probs1, n=c1.sum(), observed=c1.values)\n",
Expand Down Expand Up @@ -1719,7 +1719,7 @@
" mu = pm.Normal(\"mu\")\n",
" # study 1\n",
" probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1))\n",
" probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pm.Deterministic(\"normal1_cdf\", probs1)\n",
" pm.Multinomial(\"counts1\", p=probs1, n=c1.sum(), observed=c1.values)\n",
" # study 2\n",
Expand Down Expand Up @@ -2149,12 +2149,12 @@
"\n",
" # Study 1\n",
" probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1))\n",
" probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pm.Deterministic(\"normal1_cdf\", probs1, dims=\"bin1\")\n",
"\n",
" # Study 2\n",
" probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2))\n",
" probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pm.Deterministic(\"normal2_cdf\", probs2, dims=\"bin2\")\n",
"\n",
" # Likelihood\n",
Expand Down Expand Up @@ -2392,12 +2392,12 @@
"\n",
" # Study 1\n",
" probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1))\n",
" probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pm.Deterministic(\"normal1_cdf\", probs1, dims=\"bin1\")\n",
"\n",
" # Study 2\n",
" probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2))\n",
" probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pm.Deterministic(\"normal2_cdf\", probs2, dims=\"bin2\")\n",
"\n",
" # Likelihood\n",
Expand Down Expand Up @@ -2927,12 +2927,12 @@
" \n",
" # Study 1\n",
" probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1))\n",
" probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pm.Deterministic(\"normal1_cdf\", probs1, dims='bin1')\n",
"\n",
" # Study 2\n",
" probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2))\n",
" probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pm.Deterministic(\"normal2_cdf\", probs2, dims='bin2')\n",
"\n",
" # Likelihood\n",
Expand Down Expand Up @@ -3091,11 +3091,11 @@
" beta = pm.HalfNormal(\"beta\", 10)\n",
"\n",
" probs1 = pm.math.exp(pm.logcdf(pm.Gumbel.dist(mu=mu, beta=beta), d1))\n",
" probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))\n",
" probs1 = pm.Deterministic(\"gumbel_cdf1\", probs1)\n",
"\n",
" probs2 = pm.math.exp(pm.logcdf(pm.Gumbel.dist(mu=mu, beta=beta), d2))\n",
" probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))\n",
" probs2 = pm.Deterministic(\"gumbel_cdf2\", probs2)\n",
"\n",
" pm.Multinomial(\"counts1\", p=probs1, n=c1.sum(), observed=c1.values)\n",
Expand Down
34 changes: 17 additions & 17 deletions examples/case_studies/binning.myst.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ In ordinal regression, the cutpoints are treated as latent variables and the par
We are now in a position to sketch out a generative PyMC model:

```python
import pytensor.tensor as at
import pytensor.tensor as pt

with pm.Model() as model:
# priors
Expand All @@ -78,7 +78,7 @@ with pm.Model() as model:
# generative process
probs = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), cutpoints))
probs = pm.math.concatenate([[0], probs, [1]])
probs = at.extra_ops.diff(probs)
probs = pt.extra_ops.diff(probs)
# likelihood
pm.Multinomial("counts", p=probs, n=sum(counts), observed=counts)
```
Expand All @@ -95,7 +95,7 @@ probs = pm.math.concatenate([[0], probs, [1]])
simply concatenates the cumulative density at $-\infty$ (which is zero) and at $\infty$ (which is 1).
The third line
```python
probs = at.extra_ops.diff(probs)
probs = pt.extra_ops.diff(probs)
```
calculates the difference between consecutive cumulative densities to give the actual probability of a datum falling in any given bin.

Expand All @@ -115,7 +115,7 @@ import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pymc as pm
import pytensor.tensor as at
import pytensor.tensor as pt
import seaborn as sns
warnings.filterwarnings(action="ignore", category=UserWarning)
Expand Down Expand Up @@ -226,7 +226,7 @@ with pm.Model() as model1:
mu = pm.Normal("mu")
probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1))
probs1 = at.extra_ops.diff(pm.math.concatenate([[0], probs1, [1]]))
probs1 = pt.extra_ops.diff(pm.math.concatenate([[0], probs1, [1]]))
pm.Multinomial("counts1", p=probs1, n=c1.sum(), observed=c1.values)
```

Expand Down Expand Up @@ -331,7 +331,7 @@ with pm.Model() as model2:
mu = pm.Normal("mu")
probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d2))
probs2 = at.extra_ops.diff(pm.math.concatenate([[0], probs2, [1]]))
probs2 = pt.extra_ops.diff(pm.math.concatenate([[0], probs2, [1]]))
pm.Multinomial("counts2", p=probs2, n=c2.sum(), observed=c2.values)
```

Expand Down Expand Up @@ -426,11 +426,11 @@ with pm.Model() as model3:
mu = pm.Normal("mu")
probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1))
probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pm.Deterministic("normal1_cdf", probs1)
probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d2))
probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pm.Deterministic("normal2_cdf", probs2)
pm.Multinomial("counts1", p=probs1, n=c1.sum(), observed=c1.values)
Expand Down Expand Up @@ -519,7 +519,7 @@ with pm.Model() as model4:
mu = pm.Normal("mu")
# study 1
probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu, sigma=sigma), d1))
probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pm.Deterministic("normal1_cdf", probs1)
pm.Multinomial("counts1", p=probs1, n=c1.sum(), observed=c1.values)
# study 2
Expand Down Expand Up @@ -612,12 +612,12 @@ with pm.Model(coords=coords) as model5:
# Study 1
probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1))
probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pm.Deterministic("normal1_cdf", probs1, dims="bin1")
# Study 2
probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2))
probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pm.Deterministic("normal2_cdf", probs2, dims="bin2")
# Likelihood
Expand Down Expand Up @@ -645,12 +645,12 @@ with pm.Model(coords=coords) as model5:
# Study 1
probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1))
probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pm.Deterministic("normal1_cdf", probs1, dims="bin1")
# Study 2
probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2))
probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pm.Deterministic("normal2_cdf", probs2, dims="bin2")
# Likelihood
Expand Down Expand Up @@ -748,12 +748,12 @@ with pm.Model(coords=coords) as model5:

# Study 1
probs1 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[0], sigma=sigma[0]), d1))
probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pm.Deterministic("normal1_cdf", probs1, dims='bin1')

# Study 2
probs2 = pm.math.exp(pm.logcdf(pm.Normal.dist(mu=mu[1], sigma=sigma[1]), d2))
probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pm.Deterministic("normal2_cdf", probs2, dims='bin2')

# Likelihood
Expand Down Expand Up @@ -855,11 +855,11 @@ with pm.Model() as model6:
beta = pm.HalfNormal("beta", 10)
probs1 = pm.math.exp(pm.logcdf(pm.Gumbel.dist(mu=mu, beta=beta), d1))
probs1 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs1, np.array([1])]))
probs1 = pm.Deterministic("gumbel_cdf1", probs1)
probs2 = pm.math.exp(pm.logcdf(pm.Gumbel.dist(mu=mu, beta=beta), d2))
probs2 = at.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pt.extra_ops.diff(pm.math.concatenate([np.array([0]), probs2, np.array([1])]))
probs2 = pm.Deterministic("gumbel_cdf2", probs2)
pm.Multinomial("counts1", p=probs1, n=c1.sum(), observed=c1.values)
Expand Down
Loading

0 comments on commit ff67bea

Please sign in to comment.