OK335xS davinci mdio driver hacking

时间:2023-12-27 10:45:01
/*******************************************************************************
* OK335xS davinci mdio driver hacking
* 说明:
* 以前一直也想对网卡驱动的工作原理进行跟踪,这次正好有机会,先跟mdio接口部分
* 的代码。
*
* 2016-3-1 深圳 南山平山村 曾剑锋
******************************************************************************/ static struct platform_driver davinci_mdio_driver = {
.driver = {
.name = "davinci_mdio",
.owner = THIS_MODULE,
.pm = &davinci_mdio_pm_ops,
},
.probe = davinci_mdio_probe, ---------+
.remove = __devexit_p(davinci_mdio_remove), |
}; |
|
static int __init davinci_mdio_init(void) |
{ |
return platform_driver_register(&davinci_mdio_driver); |
} |
device_initcall(davinci_mdio_init); |
|
static void __exit davinci_mdio_exit(void) |
{ |
platform_driver_unregister(&davinci_mdio_driver); |
} |
module_exit(davinci_mdio_exit); |
|
MODULE_LICENSE("GPL"); |
MODULE_DESCRIPTION("DaVinci MDIO driver"); |
|
|
static int __devinit davinci_mdio_probe(struct platform_device *pdev) <-------+
{
struct mdio_platform_data *pdata = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct davinci_mdio_data *data;
struct resource *res;
struct phy_device *phy;
int ret, addr; data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
dev_err(dev, "failed to alloc device data\n");
return -ENOMEM;
} data->pdata = pdata ? (*pdata) : default_pdata; data->bus = mdiobus_alloc();
if (!data->bus) {
dev_err(dev, "failed to alloc mii bus\n");
ret = -ENOMEM;
goto bail_out;
} data->bus->name = dev_name(dev);
data->bus->read = davinci_mdio_read,
data->bus->write = davinci_mdio_write,
data->bus->reset = davinci_mdio_reset,
data->bus->parent = dev;
data->bus->priv = data;
snprintf(data->bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
data->clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(data->clk)) {
data->clk = NULL;
dev_err(dev, "failed to get device clock\n");
ret = PTR_ERR(data->clk);
goto bail_out;
} dev_set_drvdata(dev, data);
data->dev = dev;
spin_lock_init(&data->lock); res = platform_get_resource(pdev, IORESOURCE_MEM, );
if (!res) {
dev_err(dev, "could not find register map resource\n");
ret = -ENOENT;
goto bail_out;
} res = devm_request_mem_region(dev, res->start, resource_size(res),
dev_name(dev));
if (!res) {
dev_err(dev, "could not allocate register map resource\n");
ret = -ENXIO;
goto bail_out;
} data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
if (!data->regs) {
dev_err(dev, "could not map mdio registers\n");
ret = -ENOMEM;
goto bail_out;
} /* register the mii bus */
ret = mdiobus_register(data->bus); -----------------------+
if (ret) |
goto bail_out; |
|
/* scan and dump the bus */ |
for (addr = ; addr < PHY_MAX_ADDR; addr++) { |
phy = data->bus->phy_map[addr]; |
if (phy) { |
dev_info(dev, "phy[%d]: device %s, driver %s\n", |
phy->addr, dev_name(&phy->dev), |
phy->drv ? phy->drv->name : "unknown"); |
} |
} |
|
return ; |
|
bail_out: |
if (data->bus) |
mdiobus_free(data->bus); |
if (data->clk) |
clk_put(data->clk); |
pm_runtime_put_sync(&pdev->dev); |
pm_runtime_disable(&pdev->dev); |
|
kfree(data); |
|
return ret; |
} |
|
int mdiobus_register(struct mii_bus *bus) <-------------------+
{
int i, err; if (NULL == bus || NULL == bus->name ||
NULL == bus->read ||
NULL == bus->write)
return -EINVAL; BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
bus->state != MDIOBUS_UNREGISTERED); bus->dev.parent = bus->parent;
bus->dev.class = &mdio_bus_class;
bus->dev.groups = NULL;
dev_set_name(&bus->dev, "%s", bus->id); err = device_register(&bus->dev);
if (err) {
printk(KERN_ERR "mii_bus %s failed to register\n", bus->id);
return -EINVAL;
} mutex_init(&bus->mdio_lock); if (bus->reset)
bus->reset(bus); for (i = ; i < PHY_MAX_ADDR; i++) {
if ((bus->phy_mask & ( << i)) == ) {
struct phy_device *phydev; phydev = mdiobus_scan(bus, i); -----------------------+
if (IS_ERR(phydev)) { |
err = PTR_ERR(phydev); |
goto error; |
} |
} |
} |
|
bus->state = MDIOBUS_REGISTERED; |
pr_info("%s: probed\n", bus->name); |
return ; |
|
error: |
while (--i >= ) { |
if (bus->phy_map[i]) |
device_unregister(&bus->phy_map[i]->dev); |
} |
device_del(&bus->dev); |
return err; |
} |
EXPORT_SYMBOL(mdiobus_register); |
|
struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) <-------+
{
struct phy_device *phydev;
int err; phydev = get_phy_device(bus, addr); -----------+
if (IS_ERR(phydev) || phydev == NULL) |
return phydev; |
|
err = phy_device_register(phydev); ---------------+ |
if (err) { | |
phy_device_free(phydev); | |
return NULL; | |
} | |
| |
return phydev; | |
} | |
EXPORT_SYMBOL(mdiobus_scan); | |
| |
int phy_device_register(struct phy_device *phydev) <-------+ |
{ |
int err; |
|
/* Don't register a phy if one is already registered at this |
* address */ |
if (phydev->bus->phy_map[phydev->addr]) |
return -EINVAL; |
phydev->bus->phy_map[phydev->addr] = phydev; |
|
/* Run all of the fixups for this PHY */ |
phy_scan_fixups(phydev); ------------------+ |
| |
err = device_register(&phydev->dev); | |
if (err) { | |
pr_err("phy %d failed to register\n", phydev->addr); | |
goto out; | |
} | |
| |
return ; | |
| |
out: | |
phydev->bus->phy_map[phydev->addr] = NULL; | |
return err; | |
} | |
EXPORT_SYMBOL(phy_device_register); | |
| |
/* Runs any matching fixups for this phydev */ | |
int phy_scan_fixups(struct phy_device *phydev) <-----------+ |
{ |
struct phy_fixup *fixup; |
|
mutex_lock(&phy_fixup_lock); |
list_for_each_entry(fixup, &phy_fixup_list, list) { |
if (phy_needs_fixup(phydev, fixup)) { |
int err; |
|
err = fixup->run(phydev); |
|
if (err < ) { |
mutex_unlock(&phy_fixup_lock); |
return err; |
} |
} |
} |
mutex_unlock(&phy_fixup_lock); |
|
return ; |
} |
|
struct phy_device * get_phy_device(struct mii_bus *bus, int addr) <--------+
{
struct phy_device *dev = NULL;
u32 phy_id;
int r; r = get_phy_id(bus, addr, &phy_id); ------------------+
if (r) |
return ERR_PTR(r); |
|
/* If the phy_id is mostly Fs, there is no device there */ |
if ((phy_id & 0x1fffffff) == 0x1fffffff) |
return NULL; |
|
dev = phy_device_create(bus, addr, phy_id); --------*----+
| |
return dev; | |
} | |
EXPORT_SYMBOL(get_phy_device); | |
| |
int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id) <-------+ |
{ |
int phy_reg; |
|
/* Grab the bits from PHYIR1, and put them |
* in the upper half */ |
phy_reg = mdiobus_read(bus, addr, MII_PHYSID1); |
|
if (phy_reg < ) |
return -EIO; |
|
*phy_id = (phy_reg & 0xffff) << ; |
|
/* Grab the bits from PHYIR2, and put them in the lower half */ |
phy_reg = mdiobus_read(bus, addr, MII_PHYSID2); |
|
if (phy_reg < ) |
return -EIO; |
|
*phy_id |= (phy_reg & 0xffff); |
|
return ; |
} |
EXPORT_SYMBOL(get_phy_id); |
|
static struct phy_device* phy_device_create(struct mii_bus *bus, <------+
int addr, int phy_id)
{
struct phy_device *dev; /* We allocate the device, and initialize the
* default values */
dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (NULL == dev)
return (struct phy_device*) PTR_ERR((void*)-ENOMEM); dev->dev.release = phy_device_release; dev->speed = ;
dev->duplex = -;
dev->pause = dev->asym_pause = ;
dev->link = ;
dev->interface = PHY_INTERFACE_MODE_GMII; dev->autoneg = AUTONEG_ENABLE; dev->addr = addr;
dev->phy_id = phy_id;
dev->bus = bus;
dev->dev.parent = bus->parent;
dev->dev.bus = &mdio_bus_type; // important
dev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL;
dev_set_name(&dev->dev, PHY_ID_FMT, bus->id, addr); dev->state = PHY_DOWN; mutex_init(&dev->lock);
INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine); ----------+
|
/* Request the appropriate module unconditionally; don't |
bother trying to do so only if it isn't already loaded, |
because that gets complicated. A hotplug event would have |
done an unconditional modprobe anyway. |
We don't do normal hotplug because it won't work for MDIO |
-- because it relies on the device staying around for long |
enough for the driver to get loaded. With MDIO, the NIC |
driver will get bored and give up as soon as it finds that |
there's no driver _already_ loaded. */ |
request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id)); |
|
return dev; |
} |
|
void phy_state_machine(struct work_struct *work) <-------------+
{
struct delayed_work *dwork = to_delayed_work(work);
struct phy_device *phydev =
container_of(dwork, struct phy_device, state_queue);
int needs_aneg = ;
int err = ; mutex_lock(&phydev->lock); if (phydev->adjust_state)
phydev->adjust_state(phydev->attached_dev); switch(phydev->state) {
case PHY_DOWN:
case PHY_STARTING:
case PHY_READY:
case PHY_PENDING:
break;
case PHY_UP:
needs_aneg = ; phydev->link_timeout = PHY_AN_TIMEOUT; break;
case PHY_AN:
err = phy_read_status(phydev); if (err < )
break; /* If the link is down, give up on
* negotiation for now */
if (!phydev->link) {
phydev->state = PHY_NOLINK;
netif_carrier_off(phydev->attached_dev);
phydev->adjust_link(phydev->attached_dev);
break;
} /* Check if negotiation is done. Break
* if there's an error */
err = phy_aneg_done(phydev);
if (err < )
break; /* If AN is done, we're running */
if (err > ) {
phydev->state = PHY_RUNNING;
netif_carrier_on(phydev->attached_dev);
phydev->adjust_link(phydev->attached_dev); } else if ( == phydev->link_timeout--) {
int idx; needs_aneg = ;
/* If we have the magic_aneg bit,
* we try again */
if (phydev->drv->flags & PHY_HAS_MAGICANEG)
break; /* The timer expired, and we still
* don't have a setting, so we try
* forcing it until we find one that
* works, starting from the fastest speed,
* and working our way down */
idx = phy_find_valid(, phydev->supported); phydev->speed = settings[idx].speed;
phydev->duplex = settings[idx].duplex; phydev->autoneg = AUTONEG_DISABLE; pr_info("Trying %d/%s\n", phydev->speed,
DUPLEX_FULL ==
phydev->duplex ?
"FULL" : "HALF");
}
break;
case PHY_NOLINK:
err = phy_read_status(phydev); if (err)
break; if (phydev->link) {
phydev->state = PHY_RUNNING;
netif_carrier_on(phydev->attached_dev);
phydev->adjust_link(phydev->attached_dev);
}
break;
case PHY_FORCING:
err = genphy_update_link(phydev); if (err)
break; if (phydev->link) {
phydev->state = PHY_RUNNING;
netif_carrier_on(phydev->attached_dev);
} else {
if ( == phydev->link_timeout--) {
phy_force_reduction(phydev);
needs_aneg = ;
}
} phydev->adjust_link(phydev->attached_dev);
break;
case PHY_RUNNING:
/* Only register a CHANGE if we are
* polling */
if (PHY_POLL == phydev->irq)
phydev->state = PHY_CHANGELINK;
break;
case PHY_CHANGELINK:
err = phy_read_status(phydev); if (err)
break; if (phydev->link) {
phydev->state = PHY_RUNNING;
netif_carrier_on(phydev->attached_dev);
} else {
phydev->state = PHY_NOLINK;
netif_carrier_off(phydev->attached_dev);
} phydev->adjust_link(phydev->attached_dev); if (PHY_POLL != phydev->irq)
err = phy_config_interrupt(phydev,
PHY_INTERRUPT_ENABLED);
break;
case PHY_HALTED:
if (phydev->link) {
phydev->link = ;
netif_carrier_off(phydev->attached_dev);
phydev->adjust_link(phydev->attached_dev);
}
break;
case PHY_RESUMING: err = phy_clear_interrupt(phydev); if (err)
break; err = phy_config_interrupt(phydev,
PHY_INTERRUPT_ENABLED); if (err)
break; if (AUTONEG_ENABLE == phydev->autoneg) {
err = phy_aneg_done(phydev);
if (err < )
break; /* err > 0 if AN is done.
* Otherwise, it's 0, and we're
* still waiting for AN */
if (err > ) {
err = phy_read_status(phydev);
if (err)
break; if (phydev->link) {
phydev->state = PHY_RUNNING;
netif_carrier_on(phydev->attached_dev);
} else
phydev->state = PHY_NOLINK;
phydev->adjust_link(phydev->attached_dev);
} else {
phydev->state = PHY_AN;
phydev->link_timeout = PHY_AN_TIMEOUT;
}
} else {
err = phy_read_status(phydev); ---------+
if (err) |
break; |
|
if (phydev->link) { |
phydev->state = PHY_RUNNING; |
netif_carrier_on(phydev->attached_dev); |
} else |
phydev->state = PHY_NOLINK; |
phydev->adjust_link(phydev->attached_dev); |
} |
break; |
} |
|
mutex_unlock(&phydev->lock); |
|
if (needs_aneg) |
err = phy_start_aneg(phydev); |
|
if (err < ) |
phy_error(phydev); |
|
schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ); |
} |
|
static inline int phy_read_status(struct phy_device *phydev) { <--------+
return phydev->drv->read_status(phydev);
}