A community member has associated this post with a similar question:
How to fix model logging in MLFlow?

Only moderators can edit this content.

How do I fix a 404 error code in Azure ML MLFlow model logging (mlflow.pytorch.log_model)?

Gustavo Carmo 30 Reputation points
2025-06-29T14:45:22.5033333+00:00

I am attempting to use Azure ML MLFlow model logging and I am getting the following exception:

Exception has occurred: MlflowException
API request to endpoint /api/2.0/mlflow/logged-models failed with error code 404 != 200. Response body: ''
 File "/workspaces/Dexman/src/2025_06_29_model_logging_test.py", line 69, in <module> 

Full stack trace:

Traceback (most recent call last):
  File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
    return _run_code(code, main_globals, None,
  File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
    exec(code, run_globals)
  File "/root/.vscode-server/extensions/ms-python.debugpy-2025.8.0-linux-x64/bundled/libs/debugpy/adapter/../../debugpy/launcher/../../debugpy/__main__.py", line 71, in <module>
    cli.main()
  File "/root/.vscode-server/extensions/ms-python.debugpy-2025.8.0-linux-x64/bundled/libs/debugpy/adapter/../../debugpy/launcher/../../debugpy/../debugpy/server/cli.py", line 501, in main
    run()
  File "/root/.vscode-server/extensions/ms-python.debugpy-2025.8.0-linux-x64/bundled/libs/debugpy/adapter/../../debugpy/launcher/../../debugpy/../debugpy/server/cli.py", line 351, in run_file
    runpy.run_path(target, run_name="__main__")
  File "/root/.vscode-server/extensions/ms-python.debugpy-2025.8.0-linux-x64/bundled/libs/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_runpy.py", line 310, in run_path
    return _run_module_code(code, init_globals, run_name, pkg_name=pkg_name, script_name=fname)
  File "/root/.vscode-server/extensions/ms-python.debugpy-2025.8.0-linux-x64/bundled/libs/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_runpy.py", line 127, in _run_module_code
    _run_code(code, mod_globals, init_globals, mod_name, mod_spec, pkg_name, script_name)
  File "/root/.vscode-server/extensions/ms-python.debugpy-2025.8.0-linux-x64/bundled/libs/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_runpy.py", line 118, in _run_code
    exec(code, run_globals)
  File "/workspaces/Dexman/src/2025_06_29_model_logging_test.py", line 69, in <module>
    mlflow.pytorch.log_model(
  File "/usr/local/lib/python3.10/dist-packages/mlflow/pytorch/__init__.py", line 288, in log_model
    return Model.log(
  File "/usr/local/lib/python3.10/dist-packages/mlflow/models/model.py", line 1161, in log
    model = mlflow.initialize_logged_model(
  File "/usr/local/lib/python3.10/dist-packages/mlflow/tracking/fluent.py", line 2130, in initialize_logged_model
    model = _create_logged_model(
  File "/usr/local/lib/python3.10/dist-packages/mlflow/tracking/fluent.py", line 2257, in _create_logged_model
    return MlflowClient().create_logged_model(
  File "/usr/local/lib/python3.10/dist-packages/mlflow/tracking/client.py", line 5371, in create_logged_model
    return self._tracking_client.create_logged_model(
  File "/usr/local/lib/python3.10/dist-packages/mlflow/tracking/_tracking_service/client.py", line 824, in create_logged_model
    return self.store.create_logged_model(
  File "/usr/local/lib/python3.10/dist-packages/mlflow/store/tracking/rest_store.py", line 936, in create_logged_model
    response_proto = self._call_endpoint(CreateLoggedModel, req_body)
  File "/usr/local/lib/python3.10/dist-packages/mlflow/store/tracking/rest_store.py", line 135, in _call_endpoint
    return call_endpoint(
  File "/usr/local/lib/python3.10/dist-packages/mlflow/utils/rest_utils.py", line 590, in call_endpoint
    response = verify_rest_response(response, endpoint)
  File "/usr/local/lib/python3.10/dist-packages/mlflow/utils/rest_utils.py", line 310, in verify_rest_response
    raise MlflowException(
mlflow.exceptions.MlflowException: API request to endpoint /api/2.0/mlflow/logged-models failed with error code 404 != 200. Response body: ''

Here is a repro code:

#####################################################################
import os

import torch
import torch.nn as nn

import mlflow
import mlflow.pytorch
import mlflow.models

from azure.ai.ml import MLClient
from azure.identity import AzureCliCredential

#####################################################################
class SimpleNet(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(SimpleNet, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        out = self.fc1(x)
        out = self.relu(out)
        out = self.fc2(out)
        return out

#####################################################################
print("---- Dummy PyTorch model --")
input_dim = 10
hidden_dim = 5
output_dim = 1
dummy_model = SimpleNet(input_dim, hidden_dim, output_dim)
print(dummy_model)

dummy_input = torch.randn(1, input_dim) # Batch size of 1, input_dim features

print("---- Azure Machine Learning Workspace Information --")
subscription_id="REDACTED"
resource_group="westus3"
workspace_name="REDACTED"

ml_client = MLClient(credential=AzureCliCredential(),
                     subscription_id=subscription_id, 
                     resource_group_name=resource_group,
                     workspace_name=workspace_name)

workspace = ml_client.workspaces.get(name=workspace_name)
print(f"Successfully connected to Azure ML Workspace: {workspace.name} in region {workspace.location}")

os.environ["MLFLOW_TRACKING_URI"] = workspace.mlflow_tracking_uri
print(f"MLflow Tracking URI set to: {os.environ.get('MLFLOW_TRACKING_URI')}")
    
experiment_name = 'mlflow-model-log-test'
mlflow.set_experiment(experiment_name)
mlflow.autolog()
mlflow.pytorch.autolog(log_models=True)

with mlflow.start_run(run_name="mlflow-model-log-test-run") as run:
    print(f"MLflow Run ID: {run.info.run_id}")

    mlflow.log_param("input_size", input_dim)
    mlflow.log_param("hidden_size", hidden_dim)
    mlflow.log_param("output_size", output_dim)

    with torch.no_grad():
        dummy_output = dummy_model(dummy_input)

    signature = mlflow.models.infer_signature(dummy_input.numpy(), dummy_output.numpy())

    mlflow.pytorch.log_model(
        pytorch_model=dummy_model,
        name="simple_pytorch_model",
        signature=signature,
        input_example=dummy_input.numpy()
    )

    print("Dummy PyTorch model logged to MLflow successfully!")

Here is the result of a pip freeze:

adal==1.2.7
aiosignal==1.3.2
alembic==1.15.2
annotated-types==0.7.0
anyio==4.9.0
argcomplete==3.6.2
asgiref==3.8.1
attrs==25.3.0
azure-ai-ml==1.27.1
azure-common==1.1.28
azure-core==1.33.0
azure-core-tracing-opentelemetry==1.0.0b12
azure-graphrbac==0.61.2
azure-identity==1.21.0
azure-mgmt-authorization==4.0.0
azure-mgmt-containerregistry==10.3.0
azure-mgmt-core==1.5.0
azure-mgmt-keyvault==10.3.1
azure-mgmt-network==28.1.0
azure-mgmt-resource==23.3.0
azure-mgmt-storage==22.2.0
azure-monitor-opentelemetry==1.6.8
azure-monitor-opentelemetry-exporter==1.0.0b36
azure-storage-blob==12.19.0
azure-storage-file-datalake==12.14.0
azure-storage-file-share==12.21.0
azureml-core==1.60.0
azureml-dataprep==5.1.6
azureml-dataprep-native==41.0.0
azureml-dataprep-rslex==2.22.5
azureml-fsspec==1.3.1
azureml-mlflow==1.60.0
backports.tempfile==1.0
backports.weakref==1.0.post1
bcrypt==4.3.0
blinker==1.9.0
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
click==8.1.8
cloudpickle==2.2.1
colorama==0.4.6
contextlib2==21.6.0
contourpy==1.3.2
cryptography==44.0.2
cycler==0.12.1
Cython==3.0.12
databricks-sdk==0.50.0
Deprecated==1.2.18
docker==7.1.0
exceptiongroup==1.3.0
fastapi==0.115.12
ffmpeg-python==0.2.0
filelock==3.18.0
fixedint==0.1.6
Flask==3.1.0
fonttools==4.57.0
frozenlist==1.6.0
fsspec==2023.10.0
future==1.0.0
gitdb==4.0.12
GitPython==3.1.44
google-auth==2.39.0
graphene==3.4.3
graphql-core==3.2.6
graphql-relay==3.2.0
greenlet==3.2.1
gunicorn==23.0.0
h11==0.14.0
humanfriendly==10.0
idna==3.10
imageio==2.37.0
importlib_metadata==8.6.1
isodate==0.7.2
itsdangerous==2.2.0
jeepney==0.9.0
Jinja2==3.1.6
jmespath==1.0.1
joblib==1.4.2
jsonpickle==4.0.5
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
kiwisolver==1.4.8
knack==0.12.0
lazy_loader==0.4
Mako==1.3.10
Markdown==3.8
MarkupSafe==3.0.2
marshmallow==3.26.1
matplotlib==3.10.1
mldesigner==0.1.0b19
mlflow==3.1.1
mlflow-skinny==3.1.1
mltable==1.6.1
mpmath==1.3.0
msal==1.32.0
msal-extensions==1.3.1
msgpack==1.1.0
msrest==0.7.1
msrestazure==0.6.4.post1
ndg-httpsclient==0.5.1
networkx==3.4.2
numpy==2.2.6
nvidia-cublas-cu11==11.11.3.6
nvidia-cublas-cu12==12.4.5.8
nvidia-cuda-cupti-cu11==11.8.87
nvidia-cuda-cupti-cu12==12.4.127
nvidia-cuda-nvrtc-cu11==11.8.89
nvidia-cuda-nvrtc-cu12==12.4.127
nvidia-cuda-runtime-cu11==11.8.89
nvidia-cuda-runtime-cu12==12.4.127
nvidia-cudnn-cu11==9.1.0.70
nvidia-cudnn-cu12==9.1.0.70
nvidia-cufft-cu11==10.9.0.58
nvidia-cufft-cu12==11.2.1.3
nvidia-curand-cu11==10.3.0.86
nvidia-curand-cu12==10.3.5.147
nvidia-cusolver-cu11==11.4.1.48
nvidia-cusolver-cu12==11.6.1.9
nvidia-cusparse-cu11==11.7.5.86
nvidia-cusparse-cu12==12.3.1.170
nvidia-cusparselt-cu12==0.6.2
nvidia-nccl-cu11==2.21.5
nvidia-nccl-cu12==2.21.5
nvidia-nvjitlink-cu12==12.4.127
nvidia-nvtx-cu11==11.8.86
nvidia-nvtx-cu12==12.4.127
oauthlib==3.2.2
opencv-python==4.11.0.86
opentelemetry-api==1.31.1
opentelemetry-instrumentation==0.52b1
opentelemetry-instrumentation-asgi==0.52b1
opentelemetry-instrumentation-dbapi==0.52b1
opentelemetry-instrumentation-django==0.52b1
opentelemetry-instrumentation-fastapi==0.52b1
opentelemetry-instrumentation-flask==0.52b1
opentelemetry-instrumentation-psycopg2==0.52b1
opentelemetry-instrumentation-requests==0.52b1
opentelemetry-instrumentation-urllib==0.52b1
opentelemetry-instrumentation-urllib3==0.52b1
opentelemetry-instrumentation-wsgi==0.52b1
opentelemetry-resource-detector-azure==0.1.5
opentelemetry-sdk==1.31.1
opentelemetry-semantic-conventions==0.52b1
opentelemetry-util-http==0.52b1
packaging==25.0
pandas==2.3.0
paramiko==3.5.1
pathspec==0.12.1
pillow==11.2.1
pkginfo==1.12.1.2
protobuf==5.29.4
psutil==6.1.1
pyarrow==19.0.1
pyasn1==0.6.1
pyasn1_modules==0.4.2
pycparser==2.22
pydantic==2.11.3
pydantic_core==2.33.1
pydash==8.0.5
Pygments==2.19.2
PyJWT==2.10.1
PyNaCl==1.5.0
pyOpenSSL==25.0.0
pyparsing==3.2.3
PySocks==1.7.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ray==2.31.0
referencing==0.36.2
requests==2.32.3
requests-oauthlib==2.0.0
rpds-py==0.24.0
rsa==4.9.1
scikit-image==0.25.2
scikit-learn==1.6.1
scipy==1.15.2
SecretStorage==3.3.3
six==1.17.0
smmap==5.0.2
sniffio==1.3.1
SQLAlchemy==2.0.40
sqlparse==0.5.3
starlette==0.46.2
strictyaml==1.7.3
sympy==1.13.1
tabulate==0.9.0
threadpoolctl==3.6.0
tifffile==2025.3.30
torch==2.6.0
tqdm==4.67.1
triton==3.2.0
typing-inspection==0.4.0
typing_extensions==4.14.0
tzdata==2025.2
urllib3==2.4.0
uvicorn==0.34.2
Werkzeug==3.1.3
wrapt==1.17.2
zipp==3.21.0

This is running against westus3 and the workspace creating info is this:

    "systemData": {
        "createdAt": "2024-05-14T14:41:29.8913557Z",
        "createdBy": "0736f41a-0425-4b46-bdb5-1563eff02385",
        "createdByType": "Application",
        "lastModifiedAt": "2024-05-14T14:41:29.8913557Z",
        "lastModifiedBy": "0736f41a-0425-4b46-bdb5-1563eff02385",
        "lastModifiedByType": "Application"
    },

Can you please help me diagnose this issue?

Thanks a lot

Azure Machine Learning
{count} votes