Commit c781c06d authored by Kristian Høgsberg's avatar Kristian Høgsberg Committed by Stefan Richter

firewire: Clean up comment style.

Drop filenames from file preamble, drop editor annotations and
use standard indent style for block comments.
Signed-off-by: default avatarKristian Hoegsberg <krh@redhat.com>
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de> (fixed typo)
parent e175569c
/* -*- c-basic-offset: 8 -*- /*
* * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
* fw-card.c - card level functions
*
* Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -69,12 +66,14 @@ generate_config_rom (struct fw_card *card, size_t *config_rom_length) ...@@ -69,12 +66,14 @@ generate_config_rom (struct fw_card *card, size_t *config_rom_length)
static u32 config_rom[256]; static u32 config_rom[256];
int i, j, length; int i, j, length;
/* Initialize contents of config rom buffer. On the OHCI /*
* Initialize contents of config rom buffer. On the OHCI
* controller, block reads to the config rom accesses the host * controller, block reads to the config rom accesses the host
* memory, but quadlet read access the hardware bus info block * memory, but quadlet read access the hardware bus info block
* registers. That's just crack, but it means we should make * registers. That's just crack, but it means we should make
* sure the contents of bus info block in host memory mathces * sure the contents of bus info block in host memory mathces
* the version stored in the OHCI registers. */ * the version stored in the OHCI registers.
*/
memset(config_rom, 0, sizeof config_rom); memset(config_rom, 0, sizeof config_rom);
config_rom[0] = bib_crc_length(4) | bib_info_length(4) | bib_crc(0); config_rom[0] = bib_crc_length(4) | bib_info_length(4) | bib_crc(0);
...@@ -143,9 +142,11 @@ fw_core_add_descriptor (struct fw_descriptor *desc) ...@@ -143,9 +142,11 @@ fw_core_add_descriptor (struct fw_descriptor *desc)
{ {
size_t i; size_t i;
/* Check descriptor is valid; the length of all blocks in the /*
* Check descriptor is valid; the length of all blocks in the
* descriptor has to add up to exactly the length of the * descriptor has to add up to exactly the length of the
* block. */ * block.
*/
i = 0; i = 0;
while (i < desc->length) while (i < desc->length)
i += (desc->data[i] >> 16) + 1; i += (desc->data[i] >> 16) + 1;
...@@ -228,7 +229,8 @@ fw_card_bm_work(struct work_struct *work) ...@@ -228,7 +229,8 @@ fw_card_bm_work(struct work_struct *work)
if (card->bm_generation + 1 == generation || if (card->bm_generation + 1 == generation ||
(card->bm_generation != generation && grace)) { (card->bm_generation != generation && grace)) {
/* This first step is to figure out who is IRM and /*
* This first step is to figure out who is IRM and
* then try to become bus manager. If the IRM is not * then try to become bus manager. If the IRM is not
* well defined (e.g. does not have an active link * well defined (e.g. does not have an active link
* layer or does not responds to our lock request, we * layer or does not responds to our lock request, we
...@@ -236,7 +238,8 @@ fw_card_bm_work(struct work_struct *work) ...@@ -236,7 +238,8 @@ fw_card_bm_work(struct work_struct *work)
* In that case, we do a goto into the gap count logic * In that case, we do a goto into the gap count logic
* so that when we do the reset, we still optimize the * so that when we do the reset, we still optimize the
* gap count. That could well save a reset in the * gap count. That could well save a reset in the
* next generation. */ * next generation.
*/
irm_id = card->irm_node->node_id; irm_id = card->irm_node->node_id;
if (!card->irm_node->link_on) { if (!card->irm_node->link_on) {
...@@ -260,8 +263,10 @@ fw_card_bm_work(struct work_struct *work) ...@@ -260,8 +263,10 @@ fw_card_bm_work(struct work_struct *work)
wait_for_completion(&bmd.done); wait_for_completion(&bmd.done);
if (bmd.rcode == RCODE_GENERATION) { if (bmd.rcode == RCODE_GENERATION) {
/* Another bus reset happened. Just return, /*
* the BM work has been rescheduled. */ * Another bus reset happened. Just return,
* the BM work has been rescheduled.
*/
return; return;
} }
...@@ -271,48 +276,62 @@ fw_card_bm_work(struct work_struct *work) ...@@ -271,48 +276,62 @@ fw_card_bm_work(struct work_struct *work)
spin_lock_irqsave(&card->lock, flags); spin_lock_irqsave(&card->lock, flags);
if (bmd.rcode != RCODE_COMPLETE) { if (bmd.rcode != RCODE_COMPLETE) {
/* The lock request failed, maybe the IRM /*
* The lock request failed, maybe the IRM
* isn't really IRM capable after all. Let's * isn't really IRM capable after all. Let's
* do a bus reset and pick the local node as * do a bus reset and pick the local node as
* root, and thus, IRM. */ * root, and thus, IRM.
*/
new_root_id = card->local_node->node_id; new_root_id = card->local_node->node_id;
fw_notify("BM lock failed, making local node (%02x) root.\n", fw_notify("BM lock failed, making local node (%02x) root.\n",
new_root_id); new_root_id);
goto pick_me; goto pick_me;
} }
} else if (card->bm_generation != generation) { } else if (card->bm_generation != generation) {
/* OK, we weren't BM in the last generation, and it's /*
* OK, we weren't BM in the last generation, and it's
* less than 100ms since last bus reset. Reschedule * less than 100ms since last bus reset. Reschedule
* this task 100ms from now. */ * this task 100ms from now.
*/
spin_unlock_irqrestore(&card->lock, flags); spin_unlock_irqrestore(&card->lock, flags);
schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10)); schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10));
return; return;
} }
/* We're bus manager for this generation, so next step is to /*
* We're bus manager for this generation, so next step is to
* make sure we have an active cycle master and do gap count * make sure we have an active cycle master and do gap count
* optimization. */ * optimization.
*/
card->bm_generation = generation; card->bm_generation = generation;
if (root == NULL) { if (root == NULL) {
/* Either link_on is false, or we failed to read the /*
* config rom. In either case, pick another root. */ * Either link_on is false, or we failed to read the
* config rom. In either case, pick another root.
*/
new_root_id = card->local_node->node_id; new_root_id = card->local_node->node_id;
} else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) { } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) {
/* If we haven't probed this device yet, bail out now /*
* and let's try again once that's done. */ * If we haven't probed this device yet, bail out now
* and let's try again once that's done.
*/
spin_unlock_irqrestore(&card->lock, flags); spin_unlock_irqrestore(&card->lock, flags);
return; return;
} else if (root->config_rom[2] & bib_cmc) { } else if (root->config_rom[2] & bib_cmc) {
/* FIXME: I suppose we should set the cmstr bit in the /*
* FIXME: I suppose we should set the cmstr bit in the
* STATE_CLEAR register of this node, as described in * STATE_CLEAR register of this node, as described in
* 1394-1995, 8.4.2.6. Also, send out a force root * 1394-1995, 8.4.2.6. Also, send out a force root
* packet for this node. */ * packet for this node.
*/
new_root_id = root_id; new_root_id = root_id;
} else { } else {
/* Current root has an active link layer and we /*
* Current root has an active link layer and we
* successfully read the config rom, but it's not * successfully read the config rom, but it's not
* cycle master capable. */ * cycle master capable.
*/
new_root_id = card->local_node->node_id; new_root_id = card->local_node->node_id;
} }
...@@ -324,9 +343,11 @@ fw_card_bm_work(struct work_struct *work) ...@@ -324,9 +343,11 @@ fw_card_bm_work(struct work_struct *work)
else else
gap_count = 63; gap_count = 63;
/* Finally, figure out if we should do a reset or not. If we've /*
* Finally, figure out if we should do a reset or not. If we've
* done less that 5 resets with the same physical topology and we * done less that 5 resets with the same physical topology and we
* have either a new root or a new gap count setting, let's do it. */ * have either a new root or a new gap count setting, let's do it.
*/
if (card->bm_retries++ < 5 && if (card->bm_retries++ < 5 &&
(card->gap_count != gap_count || new_root_id != root_id)) (card->gap_count != gap_count || new_root_id != root_id))
...@@ -391,8 +412,10 @@ fw_card_add(struct fw_card *card, ...@@ -391,8 +412,10 @@ fw_card_add(struct fw_card *card,
PHY_LINK_ACTIVE | PHY_CONTENDER) < 0) PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
return -EIO; return -EIO;
/* The subsystem grabs a reference when the card is added and /*
* drops it when the driver calls fw_core_remove_card. */ * The subsystem grabs a reference when the card is added and
* drops it when the driver calls fw_core_remove_card.
*/
fw_card_get(card); fw_card_get(card);
down_write(&card_rwsem); down_write(&card_rwsem);
...@@ -405,11 +428,13 @@ fw_card_add(struct fw_card *card, ...@@ -405,11 +428,13 @@ fw_card_add(struct fw_card *card,
EXPORT_SYMBOL(fw_card_add); EXPORT_SYMBOL(fw_card_add);
/* The next few functions implements a dummy driver that use once a /*
* The next few functions implements a dummy driver that use once a
* card driver shuts down an fw_card. This allows the driver to * card driver shuts down an fw_card. This allows the driver to
* cleanly unload, as all IO to the card will be handled by the dummy * cleanly unload, as all IO to the card will be handled by the dummy
* driver instead of calling into the (possibly) unloaded module. The * driver instead of calling into the (possibly) unloaded module. The
* dummy driver just fails all IO. */ * dummy driver just fails all IO.
*/
static int static int
dummy_enable(struct fw_card *card, u32 *config_rom, size_t length) dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
...@@ -429,8 +454,10 @@ static int ...@@ -429,8 +454,10 @@ static int
dummy_set_config_rom(struct fw_card *card, dummy_set_config_rom(struct fw_card *card,
u32 *config_rom, size_t length) u32 *config_rom, size_t length)
{ {
/* We take the card out of card_list before setting the dummy /*
* driver, so this should never get called. */ * We take the card out of card_list before setting the dummy
* driver, so this should never get called.
*/
BUG(); BUG();
return -1; return -1;
} }
...@@ -510,9 +537,11 @@ release_card(struct kref *kref) ...@@ -510,9 +537,11 @@ release_card(struct kref *kref)
kfree(card); kfree(card);
} }
/* An assumption for fw_card_put() is that the card driver allocates /*
* An assumption for fw_card_put() is that the card driver allocates
* the fw_card struct with kalloc and that it has been shut down * the fw_card struct with kalloc and that it has been shut down
* before the last ref is dropped. */ * before the last ref is dropped.
*/
void void
fw_card_put(struct fw_card *card) fw_card_put(struct fw_card *card)
{ {
...@@ -524,8 +553,6 @@ int ...@@ -524,8 +553,6 @@ int
fw_core_initiate_bus_reset(struct fw_card *card, int short_reset) fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
{ {
int reg = short_reset ? 5 : 1; int reg = short_reset ? 5 : 1;
/* The following values happen to be the same bit. However be
* explicit for clarity. */
int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
return card->driver->update_phy_reg(card, reg, 0, bit); return card->driver->update_phy_reg(card, reg, 0, bit);
......
/* -*- c-basic-offset: 8 -*- /*
* Char device for device raw access
* *
* fw-device-cdev.c - Char device for device raw access * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
*
* Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -36,9 +35,6 @@ ...@@ -36,9 +35,6 @@
#include "fw-topology.h" #include "fw-topology.h"
#include "fw-device.h" #include "fw-device.h"
/* dequeue_event() just kfree()'s the event, so the event has to be
* the first field in the struct. */
struct client; struct client;
struct client_resource { struct client_resource {
struct list_head link; struct list_head link;
...@@ -46,6 +42,11 @@ struct client_resource { ...@@ -46,6 +42,11 @@ struct client_resource {
u32 handle; u32 handle;
}; };
/*
* dequeue_event() just kfree()'s the event, so the event has to be
* the first field in the struct.
*/
struct event { struct event {
struct { void *data; size_t size; } v[2]; struct { void *data; size_t size; } v[2];
struct list_head link; struct list_head link;
...@@ -691,13 +692,15 @@ static int ioctl_queue_iso(struct client *client, void *buffer) ...@@ -691,13 +692,15 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
if (ctx == NULL || request->handle != 0) if (ctx == NULL || request->handle != 0)
return -EINVAL; return -EINVAL;
/* If the user passes a non-NULL data pointer, has mmap()'ed /*
* If the user passes a non-NULL data pointer, has mmap()'ed
* the iso buffer, and the pointer points inside the buffer, * the iso buffer, and the pointer points inside the buffer,
* we setup the payload pointers accordingly. Otherwise we * we setup the payload pointers accordingly. Otherwise we
* set them both to 0, which will still let packets with * set them both to 0, which will still let packets with
* payload_length == 0 through. In other words, if no packets * payload_length == 0 through. In other words, if no packets
* use the indirect payload, the iso buffer need not be mapped * use the indirect payload, the iso buffer need not be mapped
* and the request->data pointer is ignored.*/ * and the request->data pointer is ignored.
*/
payload = (unsigned long)request->data - client->vm_start; payload = (unsigned long)request->data - client->vm_start;
buffer_end = client->buffer.page_count << PAGE_SHIFT; buffer_end = client->buffer.page_count << PAGE_SHIFT;
...@@ -720,8 +723,10 @@ static int ioctl_queue_iso(struct client *client, void *buffer) ...@@ -720,8 +723,10 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
header_length = u.packet.header_length; header_length = u.packet.header_length;
} else { } else {
/* We require that header_length is a multiple of /*
* the fixed header size, ctx->header_size */ * We require that header_length is a multiple of
* the fixed header size, ctx->header_size.
*/
if (ctx->header_size == 0) { if (ctx->header_size == 0) {
if (u.packet.header_length > 0) if (u.packet.header_length > 0)
return -EINVAL; return -EINVAL;
...@@ -908,8 +913,10 @@ static int fw_device_op_release(struct inode *inode, struct file *file) ...@@ -908,8 +913,10 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
list_for_each_entry_safe(r, next_r, &client->resource_list, link) list_for_each_entry_safe(r, next_r, &client->resource_list, link)
r->release(client, r); r->release(client, r);
/* FIXME: We should wait for the async tasklets to stop /*
* running before freeing the memory. */ * FIXME: We should wait for the async tasklets to stop
* running before freeing the memory.
*/
list_for_each_entry_safe(e, next_e, &client->event_list, link) list_for_each_entry_safe(e, next_e, &client->event_list, link)
kfree(e); kfree(e);
......
/* -*- c-basic-offset: 8 -*- /*
* * Device probing and sysfs code.
* fw-device.c - Device probing and sysfs code.
* *
* Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
* *
...@@ -174,8 +173,10 @@ static void fw_device_release(struct device *dev) ...@@ -174,8 +173,10 @@ static void fw_device_release(struct device *dev)
struct fw_device *device = fw_device(dev); struct fw_device *device = fw_device(dev);
unsigned long flags; unsigned long flags;
/* Take the card lock so we don't set this to NULL while a /*
* FW_NODE_UPDATED callback is being handled. */ * Take the card lock so we don't set this to NULL while a
* FW_NODE_UPDATED callback is being handled.
*/
spin_lock_irqsave(&device->card->lock, flags); spin_lock_irqsave(&device->card->lock, flags);
device->node->data = NULL; device->node->data = NULL;
spin_unlock_irqrestore(&device->card->lock, flags); spin_unlock_irqrestore(&device->card->lock, flags);
...@@ -421,34 +422,42 @@ static int read_bus_info_block(struct fw_device *device) ...@@ -421,34 +422,42 @@ static int read_bus_info_block(struct fw_device *device)
for (i = 0; i < 5; i++) { for (i = 0; i < 5; i++) {
if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE) if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
return -1; return -1;
/* As per IEEE1212 7.2, during power-up, devices can /*
* As per IEEE1212 7.2, during power-up, devices can
* reply with a 0 for the first quadlet of the config * reply with a 0 for the first quadlet of the config
* rom to indicate that they are booting (for example, * rom to indicate that they are booting (for example,
* if the firmware is on the disk of a external * if the firmware is on the disk of a external
* harddisk). In that case we just fail, and the * harddisk). In that case we just fail, and the
* retry mechanism will try again later. */ * retry mechanism will try again later.
*/
if (i == 0 && rom[i] == 0) if (i == 0 && rom[i] == 0)
return -1; return -1;
} }
/* Now parse the config rom. The config rom is a recursive /*
* Now parse the config rom. The config rom is a recursive
* directory structure so we parse it using a stack of * directory structure so we parse it using a stack of
* references to the blocks that make up the structure. We * references to the blocks that make up the structure. We
* push a reference to the root directory on the stack to * push a reference to the root directory on the stack to
* start things off. */ * start things off.
*/
length = i; length = i;
sp = 0; sp = 0;
stack[sp++] = 0xc0000005; stack[sp++] = 0xc0000005;
while (sp > 0) { while (sp > 0) {
/* Pop the next block reference of the stack. The /*
* Pop the next block reference of the stack. The
* lower 24 bits is the offset into the config rom, * lower 24 bits is the offset into the config rom,
* the upper 8 bits are the type of the reference the * the upper 8 bits are the type of the reference the
* block. */ * block.
*/
key = stack[--sp]; key = stack[--sp];
i = key & 0xffffff; i = key & 0xffffff;
if (i >= ARRAY_SIZE(rom)) if (i >= ARRAY_SIZE(rom))
/* The reference points outside the standard /*
* config rom area, something's fishy. */ * The reference points outside the standard
* config rom area, something's fishy.
*/
return -1; return -1;
/* Read header quadlet for the block to get the length. */ /* Read header quadlet for the block to get the length. */
...@@ -457,15 +466,19 @@ static int read_bus_info_block(struct fw_device *device) ...@@ -457,15 +466,19 @@ static int read_bus_info_block(struct fw_device *device)
end = i + (rom[i] >> 16) + 1; end = i + (rom[i] >> 16) + 1;
i++; i++;
if (end > ARRAY_SIZE(rom)) if (end > ARRAY_SIZE(rom))
/* This block extends outside standard config /*
* This block extends outside standard config
* area (and the array we're reading it * area (and the array we're reading it
* into). That's broken, so ignore this * into). That's broken, so ignore this
* device. */ * device.
*/
return -1; return -1;
/* Now read in the block. If this is a directory /*
* Now read in the block. If this is a directory
* block, check the entries as we read them to see if * block, check the entries as we read them to see if
* it references another block, and push it in that case. */ * it references another block, and push it in that case.
*/
while (i < end) { while (i < end) {
if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE) if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
return -1; return -1;
...@@ -516,8 +529,10 @@ static void create_units(struct fw_device *device) ...@@ -516,8 +529,10 @@ static void create_units(struct fw_device *device)
if (key != (CSR_UNIT | CSR_DIRECTORY)) if (key != (CSR_UNIT | CSR_DIRECTORY))
continue; continue;
/* Get the address of the unit directory and try to /*
* match the drivers id_tables against it. */ * Get the address of the unit directory and try to
* match the drivers id_tables against it.
*/
unit = kzalloc(sizeof *unit, GFP_KERNEL); unit = kzalloc(sizeof *unit, GFP_KERNEL);
if (unit == NULL) { if (unit == NULL) {
fw_error("failed to allocate memory for unit\n"); fw_error("failed to allocate memory for unit\n");
...@@ -585,14 +600,16 @@ static struct device_type fw_device_type = { ...@@ -585,14 +600,16 @@ static struct device_type fw_device_type = {
.release = fw_device_release, .release = fw_device_release,
}; };
/* These defines control the retry behavior for reading the config /*
* These defines control the retry behavior for reading the config
* rom. It shouldn't be necessary to tweak these; if the device * rom. It shouldn't be necessary to tweak these; if the device
* doesn't respond to a config rom read within 10 seconds, it's not * doesn't respond to a config rom read within 10 seconds, it's not
* going to respond at all. As for the initial delay, a lot of * going to respond at all. As for the initial delay, a lot of
* devices will be able to respond within half a second after bus * devices will be able to respond within half a second after bus
* reset. On the other hand, it's not really worth being more * reset. On the other hand, it's not really worth being more
* aggressive than that, since it scales pretty well; if 10 devices * aggressive than that, since it scales pretty well; if 10 devices
* are plugged in, they're all getting read within one second. */ * are plugged in, they're all getting read within one second.
*/
#define MAX_RETRIES 10 #define MAX_RETRIES 10
#define RETRY_DELAY (3 * HZ) #define RETRY_DELAY (3 * HZ)
...@@ -604,9 +621,11 @@ static void fw_device_init(struct work_struct *work) ...@@ -604,9 +621,11 @@ static void fw_device_init(struct work_struct *work)
container_of(work, struct fw_device, work.work); container_of(work, struct fw_device, work.work);
int minor, err; int minor, err;
/* All failure paths here set node->data to NULL, so that we /*
* All failure paths here set node->data to NULL, so that we
* don't try to do device_for_each_child() on a kfree()'d * don't try to do device_for_each_child() on a kfree()'d
* device. */ * device.
*/
if (read_bus_info_block(device) < 0) { if (read_bus_info_block(device) < 0) {
if (device->config_rom_retries < MAX_RETRIES) { if (device->config_rom_retries < MAX_RETRIES) {
...@@ -647,13 +666,15 @@ static void fw_device_init(struct work_struct *work) ...@@ -647,13 +666,15 @@ static void fw_device_init(struct work_struct *work)
create_units(device); create_units(device);
/* Transition the device to running state. If it got pulled /*
* Transition the device to running state. If it got pulled
* out from under us while we did the intialization work, we * out from under us while we did the intialization work, we
* have to shut down the device again here. Normally, though, * have to shut down the device again here. Normally, though,
* fw_node_event will be responsible for shutting it down when * fw_node_event will be responsible for shutting it down when
* necessary. We have to use the atomic cmpxchg here to avoid * necessary. We have to use the atomic cmpxchg here to avoid
* racing with the FW_NODE_DESTROYED case in * racing with the FW_NODE_DESTROYED case in
* fw_node_event(). */ * fw_node_event().
*/
if (atomic_cmpxchg(&device->state, if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING, FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
...@@ -662,10 +683,12 @@ static void fw_device_init(struct work_struct *work) ...@@ -662,10 +683,12 @@ static void fw_device_init(struct work_struct *work)
fw_notify("created new fw device %s (%d config rom retries)\n", fw_notify("created new fw device %s (%d config rom retries)\n",
device->device.bus_id, device->config_rom_retries); device->device.bus_id, device->config_rom_retries);
/* Reschedule the IRM work if we just finished reading the /*
* Reschedule the IRM work if we just finished reading the
* root node config rom. If this races with a bus reset we * root node config rom. If this races with a bus reset we
* just end up running the IRM work a couple of extra times - * just end up running the IRM work a couple of extra times -
* pretty harmless. */ * pretty harmless.
*/
if (device->node == device->card->root_node) if (device->node == device->card->root_node)
schedule_delayed_work(&device->card->work, 0); schedule_delayed_work(&device->card->work, 0);
...@@ -716,12 +739,14 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) ...@@ -716,12 +739,14 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
if (device == NULL) if (device == NULL)
break; break;
/* Do minimal intialization of the device here, the /*
* Do minimal intialization of the device here, the
* rest will happen in fw_device_init(). We need the * rest will happen in fw_device_init(). We need the
* card and node so we can read the config rom and we * card and node so we can read the config rom and we
* need to do device_initialize() now so * need to do device_initialize() now so
* device_for_each_child() in FW_NODE_UPDATED is * device_for_each_child() in FW_NODE_UPDATED is
* doesn't freak out. */ * doesn't freak out.
*/
device_initialize(&device->device); device_initialize(&device->device);
atomic_set(&device->state, FW_DEVICE_INITIALIZING); atomic_set(&device->state, FW_DEVICE_INITIALIZING);
device->card = fw_card_get(card); device->card = fw_card_get(card);
...@@ -730,15 +755,19 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) ...@@ -730,15 +755,19 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
device->generation = card->generation; device->generation = card->generation;
INIT_LIST_HEAD(&device->client_list); INIT_LIST_HEAD(&device->client_list);
/* Set the node data to point back to this device so /*
* Set the node data to point back to this device so
* FW_NODE_UPDATED callbacks can update the node_id * FW_NODE_UPDATED callbacks can update the node_id
* and generation for the device. */ * and generation for the device.
*/
node->data = device; node->data = device;
/* Many devices are slow to respond after bus resets, /*
* Many devices are slow to respond after bus resets,
* especially if they are bus powered and go through * especially if they are bus powered and go through
* power-up after getting plugged in. We schedule the * power-up after getting plugged in. We schedule the
* first config rom scan half a second after bus reset. */ * first config rom scan half a second after bus reset.
*/
INIT_DELAYED_WORK(&device->work, fw_device_init); INIT_DELAYED_WORK(&device->work, fw_device_init);
schedule_delayed_work(&device->work, INITIAL_DELAY); schedule_delayed_work(&device->work, INITIAL_DELAY);
break; break;
...@@ -761,7 +790,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) ...@@ -761,7 +790,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
if (!node->data) if (!node->data)
break; break;
/* Destroy the device associated with the node. There /*
* Destroy the device associated with the node. There
* are two cases here: either the device is fully * are two cases here: either the device is fully
* initialized (FW_DEVICE_RUNNING) or we're in the * initialized (FW_DEVICE_RUNNING) or we're in the
* process of reading its config rom * process of reading its config rom
...@@ -770,7 +800,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) ...@@ -770,7 +800,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* full fw_device_shutdown(). If not, there's work * full fw_device_shutdown(). If not, there's work
* scheduled to read it's config rom, and we just put * scheduled to read it's config rom, and we just put
* the device in shutdown state to have that code fail * the device in shutdown state to have that code fail
* to create the device. */ * to create the device.
*/
device = node->data; device = node->data;
if (atomic_xchg(&device->state, if (atomic_xchg(&device->state,
FW_DEVICE_SHUTDOWN) == FW_DEVICE_RUNNING) { FW_DEVICE_SHUTDOWN) == FW_DEVICE_RUNNING) {
......
/* -*- c-basic-offset: 8 -*- /*
*
* fw-device.h - Device probing and sysfs code.
*
* Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
......
/* -*- c-basic-offset: 8 -*- /*
* Isochronous IO functionality
* *
* fw-iso.c - Isochronous IO
* Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
......
This diff is collapsed.
/* -*- c-basic-offset: 8 -*- /*
* fw-spb2.c -- SBP2 driver (SCSI over IEEE1394) * SBP2 driver (SCSI over IEEE1394)
* *
* Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
* *
...@@ -18,7 +18,8 @@ ...@@ -18,7 +18,8 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/ */
/* The basic structure of this driver is based the old storage driver, /*
* The basic structure of this driver is based on the old storage driver,
* drivers/ieee1394/sbp2.c, originally written by * drivers/ieee1394/sbp2.c, originally written by
* James Goodwin <jamesg@filanet.com> * James Goodwin <jamesg@filanet.com>
* with later contributions and ongoing maintenance from * with later contributions and ongoing maintenance from
...@@ -60,11 +61,13 @@ struct sbp2_device { ...@@ -60,11 +61,13 @@ struct sbp2_device {
u32 workarounds; u32 workarounds;
int login_id; int login_id;
/* We cache these addresses and only update them once we've /*
* We cache these addresses and only update them once we've
* logged in or reconnected to the sbp2 device. That way, any * logged in or reconnected to the sbp2 device. That way, any
* IO to the device will automatically fail and get retried if * IO to the device will automatically fail and get retried if
* it happens in a window where the device is not ready to * it happens in a window where the device is not ready to
* handle it (e.g. after a bus reset but before we reconnect). */ * handle it (e.g. after a bus reset but before we reconnect).
*/
int node_id; int node_id;
int address_high; int address_high;
int generation; int generation;
...@@ -239,10 +242,14 @@ static const struct { ...@@ -239,10 +242,14 @@ static const struct {
.model = ~0, .model = ~0,
.workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
}, },
/* There are iPods (2nd gen, 3rd gen) with model_id == 0, but
/*
* There are iPods (2nd gen, 3rd gen) with model_id == 0, but
* these iPods do not feature the read_capacity bug according * these iPods do not feature the read_capacity bug according
* to one report. Read_capacity behaviour as well as model_id * to one report. Read_capacity behaviour as well as model_id
* could change due to Apple-supplied firmware updates though. */ * could change due to Apple-supplied firmware updates though.
*/
/* iPod 4th generation. */ { /* iPod 4th generation. */ {
.firmware_revision = 0x0a2700, .firmware_revision = 0x0a2700,
.model = 0x000021, .model = 0x000021,
...@@ -398,9 +405,10 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation, ...@@ -398,9 +405,10 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
if (orb == NULL) if (orb == NULL)
return -ENOMEM; return -ENOMEM;
/* The sbp2 device is going to send a block read request to /*
* read out the request from host memory, so map it for * The sbp2 device is going to send a block read request to
* dma. */ * read out the request from host memory, so map it for dma.
*/
orb->base.request_bus = orb->base.request_bus =
dma_map_single(device->card->device, &orb->request, dma_map_single(device->card->device, &orb->request,
sizeof orb->request, DMA_TO_DEVICE); sizeof orb->request, DMA_TO_DEVICE);
...@@ -426,10 +434,11 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation, ...@@ -426,10 +434,11 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
orb->request.status_fifo.high = sd->address_handler.offset >> 32; orb->request.status_fifo.high = sd->address_handler.offset >> 32;
orb->request.status_fifo.low = sd->address_handler.offset; orb->request.status_fifo.low = sd->address_handler.offset;
/* FIXME: Yeah, ok this isn't elegant, we hardwire exclusive /*
* FIXME: Yeah, ok this isn't elegant, we hardwire exclusive
* login and 1 second reconnect time. The reconnect setting * login and 1 second reconnect time. The reconnect setting
* is probably fine, but the exclusive login should be an * is probably fine, but the exclusive login should be an option.
* option. */ */
if (function == SBP2_LOGIN_REQUEST) { if (function == SBP2_LOGIN_REQUEST) {
orb->request.misc |= orb->request.misc |=
management_orb_exclusive | management_orb_exclusive |
...@@ -592,8 +601,10 @@ static void sbp2_login(struct work_struct *work) ...@@ -592,8 +601,10 @@ static void sbp2_login(struct work_struct *work)
sbp2_send_management_orb(unit, sd->node_id, sd->generation, sbp2_send_management_orb(unit, sd->node_id, sd->generation,
SBP2_LOGOUT_REQUEST, sd->login_id, SBP2_LOGOUT_REQUEST, sd->login_id,
NULL); NULL);
/* Set this back to sbp2_login so we fall back and /*
* retry login on bus reset. */ * Set this back to sbp2_login so we fall back and
* retry login on bus reset.
*/
PREPARE_DELAYED_WORK(&sd->work, sbp2_login); PREPARE_DELAYED_WORK(&sd->work, sbp2_login);
} }
kref_put(&sd->kref, release_sbp2_device); kref_put(&sd->kref, release_sbp2_device);
...@@ -633,9 +644,11 @@ static int sbp2_probe(struct device *dev) ...@@ -633,9 +644,11 @@ static int sbp2_probe(struct device *dev)
return -EBUSY; return -EBUSY;
} }
/* Scan unit directory to get management agent address, /*
* Scan unit directory to get management agent address,
* firmware revison and model. Initialize firmware_revision * firmware revison and model. Initialize firmware_revision
* and model to values that wont match anything in our table. */ * and model to values that wont match anything in our table.
*/
firmware_revision = 0xff000000; firmware_revision = 0xff000000;
model = 0xff000000; model = 0xff000000;
fw_csr_iterator_init(&ci, unit->directory); fw_csr_iterator_init(&ci, unit->directory);
...@@ -673,9 +686,11 @@ static int sbp2_probe(struct device *dev) ...@@ -673,9 +686,11 @@ static int sbp2_probe(struct device *dev)
get_device(&unit->device); get_device(&unit->device);
/* We schedule work to do the login so we can easily /*
* We schedule work to do the login so we can easily
* reschedule retries. Always get the ref before scheduling * reschedule retries. Always get the ref before scheduling
* work.*/ * work.
*/
INIT_DELAYED_WORK(&sd->work, sbp2_login); INIT_DELAYED_WORK(&sd->work, sbp2_login);
if (schedule_delayed_work(&sd->work, 0)) if (schedule_delayed_work(&sd->work, 0))
kref_get(&sd->kref); kref_get(&sd->kref);
...@@ -834,9 +849,11 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) ...@@ -834,9 +849,11 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
result = sbp2_status_to_sense_data(status_get_data(*status), result = sbp2_status_to_sense_data(status_get_data(*status),
orb->cmd->sense_buffer); orb->cmd->sense_buffer);
} else { } else {
/* If the orb completes with status == NULL, something /*
* If the orb completes with status == NULL, something
* went wrong, typically a bus reset happened mid-orb * went wrong, typically a bus reset happened mid-orb
* or when sending the write (less likely). */ * or when sending the write (less likely).
*/
result = DID_BUS_BUSY << 16; result = DID_BUS_BUSY << 16;
} }
...@@ -878,11 +895,13 @@ static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb) ...@@ -878,11 +895,13 @@ static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg, count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg,
orb->cmd->sc_data_direction); orb->cmd->sc_data_direction);
/* Handle the special case where there is only one element in /*
* Handle the special case where there is only one element in
* the scatter list by converting it to an immediate block * the scatter list by converting it to an immediate block
* request. This is also a workaround for broken devices such * request. This is also a workaround for broken devices such
* as the second generation iPod which doesn't support page * as the second generation iPod which doesn't support page
* tables. */ * tables.
*/
if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) { if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
orb->request.data_descriptor.high = sd->address_high; orb->request.data_descriptor.high = sd->address_high;
orb->request.data_descriptor.low = sg_dma_address(sg); orb->request.data_descriptor.low = sg_dma_address(sg);
...@@ -891,8 +910,10 @@ static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb) ...@@ -891,8 +910,10 @@ static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
return; return;
} }
/* Convert the scatterlist to an sbp2 page table. If any /*
* scatterlist entries are too big for sbp2 we split the as we go. */ * Convert the scatterlist to an sbp2 page table. If any
* scatterlist entries are too big for sbp2 we split the as we go.
*/
for (i = 0, j = 0; i < count; i++) { for (i = 0, j = 0; i < count; i++) {
sg_len = sg_dma_len(sg + i); sg_len = sg_dma_len(sg + i);
sg_addr = sg_dma_address(sg + i); sg_addr = sg_dma_address(sg + i);
...@@ -908,11 +929,13 @@ static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb) ...@@ -908,11 +929,13 @@ static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
size = sizeof orb->page_table[0] * j; size = sizeof orb->page_table[0] * j;
/* The data_descriptor pointer is the one case where we need /*
* The data_descriptor pointer is the one case where we need
* to fill in the node ID part of the address. All other * to fill in the node ID part of the address. All other
* pointers assume that the data referenced reside on the * pointers assume that the data referenced reside on the
* initiator (i.e. us), but data_descriptor can refer to data * initiator (i.e. us), but data_descriptor can refer to data
* on other nodes so we need to put our ID in descriptor.high. */ * on other nodes so we need to put our ID in descriptor.high.
*/
orb->page_table_bus = orb->page_table_bus =
dma_map_single(device->card->device, orb->page_table, dma_map_single(device->card->device, orb->page_table,
...@@ -933,8 +956,10 @@ static void sbp2_command_orb_map_buffer(struct sbp2_command_orb *orb) ...@@ -933,8 +956,10 @@ static void sbp2_command_orb_map_buffer(struct sbp2_command_orb *orb)
struct fw_device *device = fw_device(unit->device.parent); struct fw_device *device = fw_device(unit->device.parent);
struct sbp2_device *sd = unit->device.driver_data; struct sbp2_device *sd = unit->device.driver_data;
/* As for map_scatterlist, we need to fill in the high bits of /*
* the data_descriptor pointer. */ * As for map_scatterlist, we need to fill in the high bits of
* the data_descriptor pointer.
*/
orb->request_buffer_bus = orb->request_buffer_bus =
dma_map_single(device->card->device, dma_map_single(device->card->device,
...@@ -956,8 +981,10 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) ...@@ -956,8 +981,10 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
struct sbp2_device *sd = unit->device.driver_data; struct sbp2_device *sd = unit->device.driver_data;
struct sbp2_command_orb *orb; struct sbp2_command_orb *orb;
/* Bidirectional commands are not yet implemented, and unknown /*
* transfer direction not handled. */ * Bidirectional commands are not yet implemented, and unknown
* transfer direction not handled.
*/
if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command"); fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
goto fail_alloc; goto fail_alloc;
...@@ -983,10 +1010,12 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) ...@@ -983,10 +1010,12 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
orb->request.next.high = SBP2_ORB_NULL; orb->request.next.high = SBP2_ORB_NULL;
orb->request.next.low = 0x0; orb->request.next.low = 0x0;
/* At speed 100 we can do 512 bytes per packet, at speed 200, /*
* At speed 100 we can do 512 bytes per packet, at speed 200,
* 1024 bytes per packet etc. The SBP-2 max_payload field * 1024 bytes per packet etc. The SBP-2 max_payload field
* specifies the max payload size as 2 ^ (max_payload + 2), so * specifies the max payload size as 2 ^ (max_payload + 2), so
* if we set this to max_speed + 7, we get the right value. */ * if we set this to max_speed + 7, we get the right value.
*/
orb->request.misc = orb->request.misc =
command_orb_max_payload(device->node->max_speed + 7) | command_orb_max_payload(device->node->max_speed + 7) |
command_orb_speed(device->node->max_speed) | command_orb_speed(device->node->max_speed) |
...@@ -1002,9 +1031,11 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) ...@@ -1002,9 +1031,11 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
if (cmd->use_sg) { if (cmd->use_sg) {
sbp2_command_orb_map_scatterlist(orb); sbp2_command_orb_map_scatterlist(orb);
} else if (cmd->request_bufflen > SBP2_MAX_SG_ELEMENT_LENGTH) { } else if (cmd->request_bufflen > SBP2_MAX_SG_ELEMENT_LENGTH) {
/* FIXME: Need to split this into a sg list... but /*
* FIXME: Need to split this into a sg list... but
* could we get the scsi or blk layer to do that by * could we get the scsi or blk layer to do that by
* reporting our max supported block size? */ * reporting our max supported block size?
*/
fw_error("command > 64k\n"); fw_error("command > 64k\n");
goto fail_bufflen; goto fail_bufflen;
} else if (cmd->request_bufflen > 0) { } else if (cmd->request_bufflen > 0) {
......
/* -*- c-basic-offset: 8 -*- /*
* * Incremental bus scan, based on bus topology
* fw-topology.c - Incremental bus scan, based on bus topology
* *
* Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net> * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
* *
...@@ -69,10 +68,12 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count) ...@@ -69,10 +68,12 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
sid++; sid++;
q = *sid; q = *sid;
/* Check that the extra packets actually are /*
* Check that the extra packets actually are
* extended self ID packets and that the * extended self ID packets and that the
* sequence numbers in the extended self ID * sequence numbers in the extended self ID
* packets increase as expected. */ * packets increase as expected.
*/
if (!self_id_extended(q) || if (!self_id_extended(q) ||
seq != self_id_ext_sequence(q)) seq != self_id_ext_sequence(q))
...@@ -113,7 +114,8 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color) ...@@ -113,7 +114,8 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
return node; return node;
} }
/* Compute the maximum hop count for this node and it's children. The /*
* Compute the maximum hop count for this node and it's children. The
* maximum hop count is the maximum number of connections between any * maximum hop count is the maximum number of connections between any
* two nodes in the subtree rooted at this node. We need this for * two nodes in the subtree rooted at this node. We need this for
* setting the gap count. As we build the tree bottom up in * setting the gap count. As we build the tree bottom up in
...@@ -202,8 +204,10 @@ static struct fw_node *build_tree(struct fw_card *card, ...@@ -202,8 +204,10 @@ static struct fw_node *build_tree(struct fw_card *card,
return NULL; return NULL;
} }
/* Seek back from the top of our stack to find the /*
* start of the child nodes for this node. */ * Seek back from the top of our stack to find the
* start of the child nodes for this node.
*/
for (i = 0, h = &stack; i < child_port_count; i++) for (i = 0, h = &stack; i < child_port_count; i++)
h = h->prev; h = h->prev;
child = fw_node(h); child = fw_node(h);
...@@ -230,7 +234,8 @@ static struct fw_node *build_tree(struct fw_card *card, ...@@ -230,7 +234,8 @@ static struct fw_node *build_tree(struct fw_card *card,
for (i = 0; i < port_count; i++) { for (i = 0; i < port_count; i++) {
switch (get_port_type(sid, i)) { switch (get_port_type(sid, i)) {
case SELFID_PORT_PARENT: case SELFID_PORT_PARENT:
/* Who's your daddy? We dont know the /*
* Who's your daddy? We dont know the
* parent node at this time, so we * parent node at this time, so we
* temporarily abuse node->color for * temporarily abuse node->color for
* remembering the entry in the * remembering the entry in the
...@@ -245,8 +250,10 @@ static struct fw_node *build_tree(struct fw_card *card, ...@@ -245,8 +250,10 @@ static struct fw_node *build_tree(struct fw_card *card,
case SELFID_PORT_CHILD: case SELFID_PORT_CHILD:
node->ports[i].node = child; node->ports[i].node = child;
/* Fix up parent reference for this /*
* child node. */ * Fix up parent reference for this
* child node.
*/
child->ports[child->color].node = node; child->ports[child->color].node = node;
child->color = card->color; child->color = card->color;
child = fw_node(child->link.next); child = fw_node(child->link.next);
...@@ -254,9 +261,11 @@ static struct fw_node *build_tree(struct fw_card *card, ...@@ -254,9 +261,11 @@ static struct fw_node *build_tree(struct fw_card *card,
} }
} }
/* Check that the node reports exactly one parent /*
* Check that the node reports exactly one parent
* port, except for the root, which of course should * port, except for the root, which of course should
* have no parents. */ * have no parents.
*/
if ((next_sid == end && parent_count != 0) || if ((next_sid == end && parent_count != 0) ||
(next_sid < end && parent_count != 1)) { (next_sid < end && parent_count != 1)) {
fw_error("Parent port inconsistency for node %d: " fw_error("Parent port inconsistency for node %d: "
...@@ -269,9 +278,11 @@ static struct fw_node *build_tree(struct fw_card *card, ...@@ -269,9 +278,11 @@ static struct fw_node *build_tree(struct fw_card *card,
list_add_tail(&node->link, &stack); list_add_tail(&node->link, &stack);
stack_depth += 1 - child_port_count; stack_depth += 1 - child_port_count;
/* If all PHYs does not report the same gap count /*
* If all PHYs does not report the same gap count
* setting, we fall back to 63 which will force a gap * setting, we fall back to 63 which will force a gap
* count reconfiguration and a reset. */ * count reconfiguration and a reset.
*/
if (self_id_gap_count(q) != gap_count) if (self_id_gap_count(q) != gap_count)
gap_count = 63; gap_count = 63;
...@@ -427,9 +438,11 @@ update_tree(struct fw_card *card, struct fw_node *root) ...@@ -427,9 +438,11 @@ update_tree(struct fw_card *card, struct fw_node *root)
for (i = 0; i < node0->port_count; i++) { for (i = 0; i < node0->port_count; i++) {
if (node0->ports[i].node && node1->ports[i].node) { if (node0->ports[i].node && node1->ports[i].node) {
/* This port didn't change, queue the /*
* This port didn't change, queue the
* connected node for further * connected node for further
* investigation. */ * investigation.
*/
if (node0->ports[i].node->color == card->color) if (node0->ports[i].node->color == card->color)
continue; continue;
list_add_tail(&node0->ports[i].node->link, list_add_tail(&node0->ports[i].node->link,
...@@ -437,19 +450,23 @@ update_tree(struct fw_card *card, struct fw_node *root) ...@@ -437,19 +450,23 @@ update_tree(struct fw_card *card, struct fw_node *root)
list_add_tail(&node1->ports[i].node->link, list_add_tail(&node1->ports[i].node->link,
&list1); &list1);
} else if (node0->ports[i].node) { } else if (node0->ports[i].node) {
/* The nodes connected here were /*
* The nodes connected here were
* unplugged; unref the lost nodes and * unplugged; unref the lost nodes and
* queue FW_NODE_LOST callbacks for * queue FW_NODE_LOST callbacks for
* them. */ * them.
*/
for_each_fw_node(card, node0->ports[i].node, for_each_fw_node(card, node0->ports[i].node,
report_lost_node); report_lost_node);
node0->ports[i].node = NULL; node0->ports[i].node = NULL;
} else if (node1->ports[i].node) { } else if (node1->ports[i].node) {
/* One or more node were connected to /*
* One or more node were connected to
* this port. Move the new nodes into * this port. Move the new nodes into
* the tree and queue FW_NODE_CREATED * the tree and queue FW_NODE_CREATED
* callbacks for them. */ * callbacks for them.
*/
move_tree(node0, node1, i); move_tree(node0, node1, i);
for_each_fw_node(card, node0->ports[i].node, for_each_fw_node(card, node0->ports[i].node,
report_found_node); report_found_node);
...@@ -486,9 +503,11 @@ fw_core_handle_bus_reset(struct fw_card *card, ...@@ -486,9 +503,11 @@ fw_core_handle_bus_reset(struct fw_card *card,
spin_lock_irqsave(&card->lock, flags); spin_lock_irqsave(&card->lock, flags);
/* If the new topology has a different self_id_count the topology /*
* If the new topology has a different self_id_count the topology
* changed, either nodes were added or removed. In that case we * changed, either nodes were added or removed. In that case we
* reset the IRM reset counter. */ * reset the IRM reset counter.
*/
if (card->self_id_count != self_id_count) if (card->self_id_count != self_id_count)
card->bm_retries = 0; card->bm_retries = 0;
......
/* -*- c-basic-offset: 8 -*- /*
*
* fw-topology.h -- Incremental bus scan, based on bus topology
*
* Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
......
/* -*- c-basic-offset: 8 -*- /*
* * Core IEEE1394 transaction logic
* fw-transaction.c - core IEEE1394 transaction logic
* *
* Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net> * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
* *
...@@ -85,21 +84,27 @@ close_transaction(struct fw_transaction *transaction, ...@@ -85,21 +84,27 @@ close_transaction(struct fw_transaction *transaction,
return -ENOENT; return -ENOENT;
} }
/* Only valid for transactions that are potentially pending (ie have /*
* been sent). */ * Only valid for transactions that are potentially pending (ie have
* been sent).
*/
int int
fw_cancel_transaction(struct fw_card *card, fw_cancel_transaction(struct fw_card *card,
struct fw_transaction *transaction) struct fw_transaction *transaction)
{ {
/* Cancel the packet transmission if it's still queued. That /*
* Cancel the packet transmission if it's still queued. That
* will call the packet transmission callback which cancels * will call the packet transmission callback which cancels
* the transaction. */ * the transaction.
*/
if (card->driver->cancel_packet(card, &transaction->packet) == 0) if (card->driver->cancel_packet(card, &transaction->packet) == 0)
return 0; return 0;
/* If the request packet has already been sent, we need to see /*
* if the transaction is still pending and remove it in that case. */ * If the request packet has already been sent, we need to see
* if the transaction is still pending and remove it in that case.
*/
return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0); return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0);
} }
...@@ -131,8 +136,10 @@ transmit_complete_callback(struct fw_packet *packet, ...@@ -131,8 +136,10 @@ transmit_complete_callback(struct fw_packet *packet,
close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0); close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0);
break; break;
default: default:
/* In this case the ack is really a juju specific /*
* rcode, so just forward that to the callback. */ * In this case the ack is really a juju specific
* rcode, so just forward that to the callback.
*/
close_transaction(t, card, status, NULL, 0); close_transaction(t, card, status, NULL, 0);
break; break;
} }
...@@ -243,13 +250,17 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t, ...@@ -243,13 +250,17 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t,
unsigned long flags; unsigned long flags;
int tlabel, source; int tlabel, source;
/* Bump the flush timer up 100ms first of all so we /*
* don't race with a flush timer callback. */ * Bump the flush timer up 100ms first of all so we
* don't race with a flush timer callback.
*/
mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10)); mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
/* Allocate tlabel from the bitmap and put the transaction on /*
* the list while holding the card spinlock. */ * Allocate tlabel from the bitmap and put the transaction on
* the list while holding the card spinlock.
*/
spin_lock_irqsave(&card->lock, flags); spin_lock_irqsave(&card->lock, flags);
...@@ -336,9 +347,11 @@ void fw_flush_transactions(struct fw_card *card) ...@@ -336,9 +347,11 @@ void fw_flush_transactions(struct fw_card *card)
list_for_each_entry_safe(t, next, &list, link) { list_for_each_entry_safe(t, next, &list, link) {
card->driver->cancel_packet(card, &t->packet); card->driver->cancel_packet(card, &t->packet);
/* At this point cancel_packet will never call the /*
* At this point cancel_packet will never call the
* transaction callback, since we just took all the * transaction callback, since we just took all the
* transactions out of the list. So do it here.*/ * transactions out of the list. So do it here.
*/
t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
} }
} }
...@@ -587,9 +600,11 @@ allocate_request(struct fw_packet *p) ...@@ -587,9 +600,11 @@ allocate_request(struct fw_packet *p)
void void
fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
{ {
/* Broadcast packets are reported as ACK_COMPLETE, so this /*
* Broadcast packets are reported as ACK_COMPLETE, so this
* check is sufficient to ensure we don't send response to * check is sufficient to ensure we don't send response to
* broadcast packets or posted writes. */ * broadcast packets or posted writes.
*/
if (request->ack != ACK_PENDING) if (request->ack != ACK_PENDING)
return; return;
...@@ -639,11 +654,13 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p) ...@@ -639,11 +654,13 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
offset, request->length); offset, request->length);
spin_unlock_irqrestore(&address_handler_lock, flags); spin_unlock_irqrestore(&address_handler_lock, flags);
/* FIXME: lookup the fw_node corresponding to the sender of /*
* FIXME: lookup the fw_node corresponding to the sender of
* this request and pass that to the address handler instead * this request and pass that to the address handler instead
* of the node ID. We may also want to move the address * of the node ID. We may also want to move the address
* allocations to fw_node so we only do this callback if the * allocations to fw_node so we only do this callback if the
* upper layers registered it for this node. */ * upper layers registered it for this node.
*/
if (handler == NULL) if (handler == NULL)
fw_send_response(card, request, RCODE_ADDRESS_ERROR); fw_send_response(card, request, RCODE_ADDRESS_ERROR);
...@@ -687,8 +704,10 @@ fw_core_handle_response(struct fw_card *card, struct fw_packet *p) ...@@ -687,8 +704,10 @@ fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
return; return;
} }
/* FIXME: sanity check packet, is length correct, does tcodes /*
* and addresses match. */ * FIXME: sanity check packet, is length correct, does tcodes
* and addresses match.
*/
switch (tcode) { switch (tcode) {
case TCODE_READ_QUADLET_RESPONSE: case TCODE_READ_QUADLET_RESPONSE:
...@@ -790,11 +809,13 @@ handle_registers(struct fw_card *card, struct fw_request *request, ...@@ -790,11 +809,13 @@ handle_registers(struct fw_card *card, struct fw_request *request,
case CSR_BANDWIDTH_AVAILABLE: case CSR_BANDWIDTH_AVAILABLE:
case CSR_CHANNELS_AVAILABLE_HI: case CSR_CHANNELS_AVAILABLE_HI:
case CSR_CHANNELS_AVAILABLE_LO: case CSR_CHANNELS_AVAILABLE_LO:
/* FIXME: these are handled by the OHCI hardware and /*
* FIXME: these are handled by the OHCI hardware and
* the stack never sees these request. If we add * the stack never sees these request. If we add
* support for a new type of controller that doesn't * support for a new type of controller that doesn't
* handle this in hardware we need to deal with these * handle this in hardware we need to deal with these
* transactions. */ * transactions.
*/
BUG(); BUG();
break; break;
......
/* -*- c-basic-offset: 8 -*- /*
*
* fw-transaction.h - Header for IEEE1394 transaction logic
*
* Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
...@@ -209,7 +206,8 @@ struct fw_packet { ...@@ -209,7 +206,8 @@ struct fw_packet {
size_t payload_length; size_t payload_length;
u32 timestamp; u32 timestamp;
/* This callback is called when the packet transmission has /*
* This callback is called when the packet transmission has
* completed; for successful transmission, the status code is * completed; for successful transmission, the status code is
* the ack received from the destination, otherwise it's a * the ack received from the destination, otherwise it's a
* negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO. * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
...@@ -230,8 +228,10 @@ struct fw_transaction { ...@@ -230,8 +228,10 @@ struct fw_transaction {
struct fw_packet packet; struct fw_packet packet;
/* The data passed to the callback is valid only during the /*
* callback. */ * The data passed to the callback is valid only during the
* callback.
*/
fw_transaction_callback_t callback; fw_transaction_callback_t callback;
void *callback_data; void *callback_data;
}; };
...@@ -291,8 +291,10 @@ struct fw_card { ...@@ -291,8 +291,10 @@ struct fw_card {
int link_speed; int link_speed;
int config_rom_generation; int config_rom_generation;
/* We need to store up to 4 self ID for a maximum of 63 /*
* devices plus 3 words for the topology map header. */ * We need to store up to 4 self ID for a maximum of 63
* devices plus 3 words for the topology map header.
*/
int self_id_count; int self_id_count;
u32 topology_map[252 + 3]; u32 topology_map[252 + 3];
...@@ -318,12 +320,14 @@ struct fw_card { ...@@ -318,12 +320,14 @@ struct fw_card {
struct fw_card *fw_card_get(struct fw_card *card); struct fw_card *fw_card_get(struct fw_card *card);
void fw_card_put(struct fw_card *card); void fw_card_put(struct fw_card *card);
/* The iso packet format allows for an immediate header/payload part /*
* The iso packet format allows for an immediate header/payload part
* stored in 'header' immediately after the packet info plus an * stored in 'header' immediately after the packet info plus an
* indirect payload part that is pointer to by the 'payload' field. * indirect payload part that is pointer to by the 'payload' field.
* Applications can use one or the other or both to implement simple * Applications can use one or the other or both to implement simple
* low-bandwidth streaming (e.g. audio) or more advanced * low-bandwidth streaming (e.g. audio) or more advanced
* scatter-gather streaming (e.g. assembling video frame automatically). */ * scatter-gather streaming (e.g. assembling video frame automatically).
*/
struct fw_iso_packet { struct fw_iso_packet {
u16 payload_length; /* Length of indirect payload. */ u16 payload_length; /* Length of indirect payload. */
...@@ -352,11 +356,13 @@ typedef void (*fw_iso_callback_t) (struct fw_iso_context *context, ...@@ -352,11 +356,13 @@ typedef void (*fw_iso_callback_t) (struct fw_iso_context *context,
void *header, void *header,
void *data); void *data);
/* An iso buffer is just a set of pages mapped for DMA in the /*
* An iso buffer is just a set of pages mapped for DMA in the
* specified direction. Since the pages are to be used for DMA, they * specified direction. Since the pages are to be used for DMA, they
* are not mapped into the kernel virtual address space. We store the * are not mapped into the kernel virtual address space. We store the
* DMA address in the page private. The helper function * DMA address in the page private. The helper function
* fw_iso_buffer_map() will map the pages into a given vma. */ * fw_iso_buffer_map() will map the pages into a given vma.
*/
struct fw_iso_buffer { struct fw_iso_buffer {
enum dma_data_direction direction; enum dma_data_direction direction;
...@@ -408,18 +414,22 @@ fw_iso_context_stop(struct fw_iso_context *ctx); ...@@ -408,18 +414,22 @@ fw_iso_context_stop(struct fw_iso_context *ctx);
struct fw_card_driver { struct fw_card_driver {
const char *name; const char *name;
/* Enable the given card with the given initial config rom. /*
* Enable the given card with the given initial config rom.
* This function is expected to activate the card, and either * This function is expected to activate the card, and either
* enable the PHY or set the link_on bit and initiate a bus * enable the PHY or set the link_on bit and initiate a bus
* reset. */ * reset.
*/
int (*enable) (struct fw_card *card, u32 *config_rom, size_t length); int (*enable) (struct fw_card *card, u32 *config_rom, size_t length);
int (*update_phy_reg) (struct fw_card *card, int address, int (*update_phy_reg) (struct fw_card *card, int address,
int clear_bits, int set_bits); int clear_bits, int set_bits);
/* Update the config rom for an enabled card. This function /*
* Update the config rom for an enabled card. This function
* should change the config rom that is presented on the bus * should change the config rom that is presented on the bus
* an initiate a bus reset. */ * an initiate a bus reset.
*/
int (*set_config_rom) (struct fw_card *card, int (*set_config_rom) (struct fw_card *card,
u32 *config_rom, size_t length); u32 *config_rom, size_t length);
...@@ -428,12 +438,14 @@ struct fw_card_driver { ...@@ -428,12 +438,14 @@ struct fw_card_driver {
/* Calling cancel is valid once a packet has been submitted. */ /* Calling cancel is valid once a packet has been submitted. */
int (*cancel_packet) (struct fw_card *card, struct fw_packet *packet); int (*cancel_packet) (struct fw_card *card, struct fw_packet *packet);
/* Allow the specified node ID to do direct DMA out and in of /*
* Allow the specified node ID to do direct DMA out and in of
* host memory. The card will disable this for all node when * host memory. The card will disable this for all node when
* a bus reset happens, so driver need to reenable this after * a bus reset happens, so driver need to reenable this after
* bus reset. Returns 0 on success, -ENODEV if the card * bus reset. Returns 0 on success, -ENODEV if the card
* doesn't support this, -ESTALE if the generation doesn't * doesn't support this, -ESTALE if the generation doesn't
* match. */ * match.
*/
int (*enable_phys_dma) (struct fw_card *card, int (*enable_phys_dma) (struct fw_card *card,
int node_id, int generation); int node_id, int generation);
...@@ -473,15 +485,15 @@ void fw_flush_transactions(struct fw_card *card); ...@@ -473,15 +485,15 @@ void fw_flush_transactions(struct fw_card *card);
void fw_send_phy_config(struct fw_card *card, void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count); int node_id, int generation, int gap_count);
/* Called by the topology code to inform the device code of node /*
* activity; found, lost, or updated nodes */ * Called by the topology code to inform the device code of node
* activity; found, lost, or updated nodes.
*/
void void
fw_node_event(struct fw_card *card, struct fw_node *node, int event); fw_node_event(struct fw_card *card, struct fw_node *node, int event);
/* API used by card level drivers */ /* API used by card level drivers */
/* Do we need phy speed here also? If we add more args, maybe we
should go back to struct fw_card_info. */
void void
fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
struct device *device); struct device *device);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment