TL;DR Link to heading
This is a deepdive into xilinx Zynq displayport DRM driver.
DRM is Linux kernel replacement for framebuf
with ioctl interfaces to user-space. As mentioned in the documentation, Hardware specific drivers register themselves with DRM and DRM does it thing with ioctl.
The Direct Rendering Manager (DRM) is a subsystem of the Linux kernel responsible for interfacing with GPUs of modern video cards.
DisplayPort subsystem driver Link to heading
Starting with platform driver for DisplayPort defined in drivers/gpu/drm/xlnx/zynqmp_dpsub.c
. The driver registers zynqmp_dpsub_probe
and its ops.
static const struct of_device_id zynqmp_dpsub_of_match[] = {
{ .compatible = "xlnx,zynqmp-dpsub-1.7", },
{ /* end of table */ },
};
MODULE_DEVICE_TABLE(of, zynqmp_dpsub_of_match);
static struct platform_driver zynqmp_dpsub_driver = {
.probe = zynqmp_dpsub_probe,
.remove_new = zynqmp_dpsub_remove,
.shutdown = zynqmp_dpsub_shutdown,
.driver = {
.name = "zynqmp-dpsub",
.pm = &zynqmp_dpsub_pm_ops,
.of_match_table = zynqmp_dpsub_of_match,
},
};
drm_module_platform_driver(zynqmp_dpsub_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("ZynqMP DP Subsystem Driver");
MODULE_LICENSE("GPL v2");
zynqmp_dpsub_probe
does several things starting with creating displayport
from platform device.
static int zynqmp_dpsub_probe(struct platform_device *pdev)
{
struct zynqmp_dpsub *dpsub;
/* Allocate private data. */
dpsub = kzalloc(sizeof(*dpsub), GFP_KERNEL);
Then some initialization code for DP with calls to initialize clocks and device tree.
ret = zynqmp_dpsub_init_clocks(dpsub);
if (ret < 0)
goto err_mem;
ret = zynqmp_dpsub_parse_dt(dpsub);
if (ret < 0)
goto err_mem;
pm_runtime_enable(&pdev->dev);
Finally, 2 probe calls for dp and display drivers.
ret = zynqmp_dp_probe(dpsub);
ret = zynqmp_disp_probe(dpsub);
if (dpsub->dma_enabled) {
ret = zynqmp_dpsub_drm_init(dpsub);
if (ret)
goto err_disp;
}
DisplayPort driver Link to heading
Let’s have a look at displayport driver first. In drivers/gpu/drm/xlnx/zynqmp_dp.c
, zynqmp_dp_probe
creates zynqmp_dp
and does hardware reset and initialization.
int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
{
struct platform_device *pdev = to_platform_device(dpsub->dev);
struct drm_bridge *bridge;
struct zynqmp_dp *dp;
struct resource *res;
int ret;
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
One of the resources that dp driver reserves is iommu memory.
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp");
dp->iomem = devm_ioremap_resource(dp->dev, res);
The driver uses the kernel reset subsystem. I will circle back when i have time.
dp->reset = devm_reset_control_get(dp->dev, NULL);
ret = zynqmp_dp_reset(dp, true);
ret = zynqmp_dp_reset(dp, false);
zynqmp_dp_phy_probe
uses devres lookup with dp-phy
.
ret = zynqmp_dp_phy_probe(dp);
/**
* zynqmp_dp_phy_probe - Probe the PHYs
* @dp: DisplayPort IP core structure
*
* Probe PHYs for all lanes. Less PHYs may be available than the number of
* lanes, which is not considered an error as long as at least one PHY is
* found. The caller can check dp->num_lanes to check how many PHYs were found.
*
* Return:
* * 0 - Success
* * -ENXIO - No PHY found
* * -EPROBE_DEFER - Probe deferral requested
* * Other negative value - PHY retrieval failure
*/
static int zynqmp_dp_phy_probe(struct zynqmp_dp *dp)
{
unsigned int i;
for (i = 0; i < ZYNQMP_DP_MAX_LANES; i++) {
char phy_name[16];
struct phy *phy;
snprintf(phy_name, sizeof(phy_name), "dp-phy%d", i);
phy = devm_phy_get(dp->dev, phy_name);
dp->phy[i] = phy;
dp->num_lanes++;
}
return 0;
}
struct phy *devm_phy_get(struct device *dev, const char *string)
{
struct phy **ptr, *phy;
ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
phy = phy_get(dev, string);
if (!IS_ERR(phy)) {
*ptr = phy;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return phy;
}
EXPORT_SYMBOL_GPL(devm_phy_get);
In the device tree, there are 2 dp-phy
define.
phys = <&lane1>, <&lane0>;
phy-names = "dp-phy0", "dp-phy1";
power-domains = <&pd_dp>;
vid-layer {
dma-names = "vid0", "vid1", "vid2";
dmas = <&xlnx_dpdma 0>,
<&xlnx_dpdma 1>,
<&xlnx_dpdma 2>;
};
Hardware initialization write some config to registers with zynqmp_dp_write
/* Initialize the hardware. */
dp->config.misc0 &= ~ZYNQMP_DP_MAIN_STREAM_MISC0_SYNC_LOCK;
zynqmp_dp_set_format(dp, NULL, ZYNQMP_DPSUB_FORMAT_RGB, 8);
zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN,
ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL);
zynqmp_dp_set(dp, ZYNQMP_DP_PHY_RESET, ZYNQMP_DP_PHY_RESET_ALL_RESET);
zynqmp_dp_write(dp, ZYNQMP_DP_FORCE_SCRAMBLER_RESET, 1);
zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 0);
zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, 0xffffffff);
ret = zynqmp_dp_phy_init(dp);
zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 1);
static void zynqmp_dp_write(struct zynqmp_dp *dp, int offset, u32 val)
{
writel(val, dp->iomem + offset);
}
At the end of zynqmp_dp_probe
, it enables irq and set subdp to point to dp device.
dp->irq = platform_get_irq(pdev, 0);
...
...
/*
* Now that the hardware is initialized and won't generate spurious
* interrupts, request the IRQ.
*/
ret = devm_request_threaded_irq(dp->dev, dp->irq, NULL,
zynqmp_dp_irq_handler, IRQF_ONESHOT,
dev_name(dp->dev), dp);
dpsub->dp = dp;
DisplayPort display driver Link to heading
drivers/gpu/drm/xlnx/zynqmp_disp.c
int zynqmp_disp_probe(struct zynqmp_dpsub *dpsub)
{
struct platform_device *pdev = to_platform_device(dpsub->dev);
struct zynqmp_disp *disp;
int ret;
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
disp->dev = &pdev->dev;
disp->dpsub = dpsub;
disp->blend.base = devm_platform_ioremap_resource_byname(pdev, "blend");
disp->avbuf.base = devm_platform_ioremap_resource_byname(pdev, "av_buf");
disp->audio.base = devm_platform_ioremap_resource_byname(pdev, "aud");
ret = zynqmp_disp_create_layers(disp);
...
...
dpsub->disp = disp;
static int zynqmp_disp_create_layers(struct zynqmp_disp *disp)
{
static const struct zynqmp_disp_layer_info layer_info[] = {
[ZYNQMP_DPSUB_LAYER_VID] = {
.formats = avbuf_vid_fmts,
.num_formats = ARRAY_SIZE(avbuf_vid_fmts),
.num_channels = 3,
},
[ZYNQMP_DPSUB_LAYER_GFX] = {
.formats = avbuf_gfx_fmts,
.num_formats = ARRAY_SIZE(avbuf_gfx_fmts),
.num_channels = 1,
},
};
for (i = 0; i < ARRAY_SIZE(disp->layers); i++) {
struct zynqmp_disp_layer *layer = &disp->layers[i];
layer->id = i;
layer->disp = disp;
layer->info = &layer_info[i];
ret = zynqmp_disp_layer_request_dma(disp, layer);
if (ret)
goto err;
disp->dpsub->layers[i] = layer;
}
static int zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp,
struct zynqmp_disp_layer *layer)
{
static const char * const dma_names[] = { "vid", "gfx" };
unsigned int i;
int ret;
for (i = 0; i < layer->info->num_channels; i++) {
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
char dma_channel_name[16];
snprintf(dma_channel_name, sizeof(dma_channel_name),
"%s%u", dma_names[layer->id], i);
dma->chan = dma_request_chan(disp->dev, dma_channel_name);
if (IS_ERR(dma->chan)) {
ret = dev_err_probe(disp->dev, PTR_ERR(dma->chan),
"failed to request dma channel\n");
dma->chan = NULL;
return ret;
}
}
return 0;
}
zynqmp_dpsub_drm_init Link to heading
This stage create the kernel DRM API(KMS)
drivers/gpu/drm/xlnx/zynqmp_kms.c
if (dpsub->dma_enabled) {
ret = zynqmp_dpsub_drm_init(dpsub);
if (ret)
goto err_disp;
} else {
drm_bridge_add(dpsub->bridge);
}
int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
{
dpdrm = devm_drm_dev_alloc(dpsub->dev, &zynqmp_dpsub_drm_driver,
struct zynqmp_dpsub_drm, dev);
/* Initialize mode config, vblank and the KMS poll helper. */
ret = drmm_mode_config_init(drm);
if (ret < 0)
return ret;
drm->mode_config.funcs = &zynqmp_dpsub_mode_config_funcs;
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = ZYNQMP_DISP_MAX_WIDTH;
drm->mode_config.max_height = ZYNQMP_DISP_MAX_HEIGHT;
ret = drm_vblank_init(drm, 1);
if (ret)
return ret;
drm_kms_helper_poll_init(drm);
ret = zynqmp_dpsub_kms_init(dpsub);
/* Reset all components and register the DRM device. */
drm_mode_config_reset(drm);
ret = drm_dev_register(drm, 0);
/* Initialize fbdev generic emulation. */
drm_fbdev_dma_setup(drm, 24);