Index: projects/release-pkg/contrib/binutils/bfd/elf.c =================================================================== --- projects/release-pkg/contrib/binutils/bfd/elf.c (revision 295925) +++ projects/release-pkg/contrib/binutils/bfd/elf.c (revision 295926) @@ -1,9246 +1,9246 @@ /* ELF executable support for BFD. Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc. This file is part of BFD, the Binary File Descriptor library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* $FreeBSD$ */ /* SECTION ELF backends BFD support for ELF formats is being worked on. Currently, the best supported back ends are for sparc and i386 (running svr4 or Solaris 2). Documentation of the internals of the support code still needs to be written. The code is changing quickly enough that we haven't bothered yet. */ /* For sparc64-cross-sparc32. */ #define _SYSCALL32 #include "sysdep.h" #include "bfd.h" #include "bfdlink.h" #include "libbfd.h" #define ARCH_SIZE 0 #include "elf-bfd.h" #include "libiberty.h" static int elf_sort_sections (const void *, const void *); static bfd_boolean assign_file_positions_except_relocs (bfd *, struct bfd_link_info *); static bfd_boolean prep_headers (bfd *); static bfd_boolean swap_out_syms (bfd *, struct bfd_strtab_hash **, int) ; static bfd_boolean elfcore_read_notes (bfd *, file_ptr, bfd_size_type) ; /* Swap version information in and out. The version information is currently size independent. If that ever changes, this code will need to move into elfcode.h. */ /* Swap in a Verdef structure. */ void _bfd_elf_swap_verdef_in (bfd *abfd, const Elf_External_Verdef *src, Elf_Internal_Verdef *dst) { dst->vd_version = H_GET_16 (abfd, src->vd_version); dst->vd_flags = H_GET_16 (abfd, src->vd_flags); dst->vd_ndx = H_GET_16 (abfd, src->vd_ndx); dst->vd_cnt = H_GET_16 (abfd, src->vd_cnt); dst->vd_hash = H_GET_32 (abfd, src->vd_hash); dst->vd_aux = H_GET_32 (abfd, src->vd_aux); dst->vd_next = H_GET_32 (abfd, src->vd_next); } /* Swap out a Verdef structure. */ void _bfd_elf_swap_verdef_out (bfd *abfd, const Elf_Internal_Verdef *src, Elf_External_Verdef *dst) { H_PUT_16 (abfd, src->vd_version, dst->vd_version); H_PUT_16 (abfd, src->vd_flags, dst->vd_flags); H_PUT_16 (abfd, src->vd_ndx, dst->vd_ndx); H_PUT_16 (abfd, src->vd_cnt, dst->vd_cnt); H_PUT_32 (abfd, src->vd_hash, dst->vd_hash); H_PUT_32 (abfd, src->vd_aux, dst->vd_aux); H_PUT_32 (abfd, src->vd_next, dst->vd_next); } /* Swap in a Verdaux structure. */ void _bfd_elf_swap_verdaux_in (bfd *abfd, const Elf_External_Verdaux *src, Elf_Internal_Verdaux *dst) { dst->vda_name = H_GET_32 (abfd, src->vda_name); dst->vda_next = H_GET_32 (abfd, src->vda_next); } /* Swap out a Verdaux structure. */ void _bfd_elf_swap_verdaux_out (bfd *abfd, const Elf_Internal_Verdaux *src, Elf_External_Verdaux *dst) { H_PUT_32 (abfd, src->vda_name, dst->vda_name); H_PUT_32 (abfd, src->vda_next, dst->vda_next); } /* Swap in a Verneed structure. */ void _bfd_elf_swap_verneed_in (bfd *abfd, const Elf_External_Verneed *src, Elf_Internal_Verneed *dst) { dst->vn_version = H_GET_16 (abfd, src->vn_version); dst->vn_cnt = H_GET_16 (abfd, src->vn_cnt); dst->vn_file = H_GET_32 (abfd, src->vn_file); dst->vn_aux = H_GET_32 (abfd, src->vn_aux); dst->vn_next = H_GET_32 (abfd, src->vn_next); } /* Swap out a Verneed structure. */ void _bfd_elf_swap_verneed_out (bfd *abfd, const Elf_Internal_Verneed *src, Elf_External_Verneed *dst) { H_PUT_16 (abfd, src->vn_version, dst->vn_version); H_PUT_16 (abfd, src->vn_cnt, dst->vn_cnt); H_PUT_32 (abfd, src->vn_file, dst->vn_file); H_PUT_32 (abfd, src->vn_aux, dst->vn_aux); H_PUT_32 (abfd, src->vn_next, dst->vn_next); } /* Swap in a Vernaux structure. */ void _bfd_elf_swap_vernaux_in (bfd *abfd, const Elf_External_Vernaux *src, Elf_Internal_Vernaux *dst) { dst->vna_hash = H_GET_32 (abfd, src->vna_hash); dst->vna_flags = H_GET_16 (abfd, src->vna_flags); dst->vna_other = H_GET_16 (abfd, src->vna_other); dst->vna_name = H_GET_32 (abfd, src->vna_name); dst->vna_next = H_GET_32 (abfd, src->vna_next); } /* Swap out a Vernaux structure. */ void _bfd_elf_swap_vernaux_out (bfd *abfd, const Elf_Internal_Vernaux *src, Elf_External_Vernaux *dst) { H_PUT_32 (abfd, src->vna_hash, dst->vna_hash); H_PUT_16 (abfd, src->vna_flags, dst->vna_flags); H_PUT_16 (abfd, src->vna_other, dst->vna_other); H_PUT_32 (abfd, src->vna_name, dst->vna_name); H_PUT_32 (abfd, src->vna_next, dst->vna_next); } /* Swap in a Versym structure. */ void _bfd_elf_swap_versym_in (bfd *abfd, const Elf_External_Versym *src, Elf_Internal_Versym *dst) { dst->vs_vers = H_GET_16 (abfd, src->vs_vers); } /* Swap out a Versym structure. */ void _bfd_elf_swap_versym_out (bfd *abfd, const Elf_Internal_Versym *src, Elf_External_Versym *dst) { H_PUT_16 (abfd, src->vs_vers, dst->vs_vers); } /* Standard ELF hash function. Do not change this function; you will cause invalid hash tables to be generated. */ unsigned long bfd_elf_hash (const char *namearg) { const unsigned char *name = (const unsigned char *) namearg; unsigned long h = 0; unsigned long g; int ch; while ((ch = *name++) != '\0') { h = (h << 4) + ch; if ((g = (h & 0xf0000000)) != 0) { h ^= g >> 24; /* The ELF ABI says `h &= ~g', but this is equivalent in this case and on some machines one insn instead of two. */ h ^= g; } } return h & 0xffffffff; } /* DT_GNU_HASH hash function. Do not change this function; you will cause invalid hash tables to be generated. */ unsigned long bfd_elf_gnu_hash (const char *namearg) { const unsigned char *name = (const unsigned char *) namearg; unsigned long h = 5381; unsigned char ch; while ((ch = *name++) != '\0') h = (h << 5) + h + ch; return h & 0xffffffff; } bfd_boolean bfd_elf_mkobject (bfd *abfd) { if (abfd->tdata.any == NULL) { abfd->tdata.any = bfd_zalloc (abfd, sizeof (struct elf_obj_tdata)); if (abfd->tdata.any == NULL) return FALSE; } elf_tdata (abfd)->program_header_size = (bfd_size_type) -1; return TRUE; } bfd_boolean bfd_elf_mkcorefile (bfd *abfd) { /* I think this can be done just like an object file. */ return bfd_elf_mkobject (abfd); } char * bfd_elf_get_str_section (bfd *abfd, unsigned int shindex) { Elf_Internal_Shdr **i_shdrp; bfd_byte *shstrtab = NULL; file_ptr offset; bfd_size_type shstrtabsize; i_shdrp = elf_elfsections (abfd); if (i_shdrp == 0 || shindex >= elf_numsections (abfd) || i_shdrp[shindex] == 0) return NULL; shstrtab = i_shdrp[shindex]->contents; if (shstrtab == NULL) { /* No cached one, attempt to read, and cache what we read. */ offset = i_shdrp[shindex]->sh_offset; shstrtabsize = i_shdrp[shindex]->sh_size; /* Allocate and clear an extra byte at the end, to prevent crashes in case the string table is not terminated. */ if (shstrtabsize + 1 == 0 || (shstrtab = bfd_alloc (abfd, shstrtabsize + 1)) == NULL || bfd_seek (abfd, offset, SEEK_SET) != 0) shstrtab = NULL; else if (bfd_bread (shstrtab, shstrtabsize, abfd) != shstrtabsize) { if (bfd_get_error () != bfd_error_system_call) bfd_set_error (bfd_error_file_truncated); shstrtab = NULL; } else shstrtab[shstrtabsize] = '\0'; i_shdrp[shindex]->contents = shstrtab; } return (char *) shstrtab; } char * bfd_elf_string_from_elf_section (bfd *abfd, unsigned int shindex, unsigned int strindex) { Elf_Internal_Shdr *hdr; if (strindex == 0) return ""; if (elf_elfsections (abfd) == NULL || shindex >= elf_numsections (abfd)) return NULL; hdr = elf_elfsections (abfd)[shindex]; if (hdr->contents == NULL && bfd_elf_get_str_section (abfd, shindex) == NULL) return NULL; if (strindex >= hdr->sh_size) { unsigned int shstrndx = elf_elfheader(abfd)->e_shstrndx; (*_bfd_error_handler) (_("%B: invalid string offset %u >= %lu for section `%s'"), abfd, strindex, (unsigned long) hdr->sh_size, (shindex == shstrndx && strindex == hdr->sh_name ? ".shstrtab" : bfd_elf_string_from_elf_section (abfd, shstrndx, hdr->sh_name))); return ""; } return ((char *) hdr->contents) + strindex; } /* Read and convert symbols to internal format. SYMCOUNT specifies the number of symbols to read, starting from symbol SYMOFFSET. If any of INTSYM_BUF, EXTSYM_BUF or EXTSHNDX_BUF are non-NULL, they are used to store the internal symbols, external symbols, and symbol section index extensions, respectively. */ Elf_Internal_Sym * bfd_elf_get_elf_syms (bfd *ibfd, Elf_Internal_Shdr *symtab_hdr, size_t symcount, size_t symoffset, Elf_Internal_Sym *intsym_buf, void *extsym_buf, Elf_External_Sym_Shndx *extshndx_buf) { Elf_Internal_Shdr *shndx_hdr; void *alloc_ext; const bfd_byte *esym; Elf_External_Sym_Shndx *alloc_extshndx; Elf_External_Sym_Shndx *shndx; Elf_Internal_Sym *isym; Elf_Internal_Sym *isymend; const struct elf_backend_data *bed; size_t extsym_size; bfd_size_type amt; file_ptr pos; if (symcount == 0) return intsym_buf; /* Normal syms might have section extension entries. */ shndx_hdr = NULL; if (symtab_hdr == &elf_tdata (ibfd)->symtab_hdr) shndx_hdr = &elf_tdata (ibfd)->symtab_shndx_hdr; /* Read the symbols. */ alloc_ext = NULL; alloc_extshndx = NULL; bed = get_elf_backend_data (ibfd); extsym_size = bed->s->sizeof_sym; amt = symcount * extsym_size; pos = symtab_hdr->sh_offset + symoffset * extsym_size; if (extsym_buf == NULL) { alloc_ext = bfd_malloc2 (symcount, extsym_size); extsym_buf = alloc_ext; } if (extsym_buf == NULL || bfd_seek (ibfd, pos, SEEK_SET) != 0 || bfd_bread (extsym_buf, amt, ibfd) != amt) { intsym_buf = NULL; goto out; } if (shndx_hdr == NULL || shndx_hdr->sh_size == 0) extshndx_buf = NULL; else { amt = symcount * sizeof (Elf_External_Sym_Shndx); pos = shndx_hdr->sh_offset + symoffset * sizeof (Elf_External_Sym_Shndx); if (extshndx_buf == NULL) { alloc_extshndx = bfd_malloc2 (symcount, sizeof (Elf_External_Sym_Shndx)); extshndx_buf = alloc_extshndx; } if (extshndx_buf == NULL || bfd_seek (ibfd, pos, SEEK_SET) != 0 || bfd_bread (extshndx_buf, amt, ibfd) != amt) { intsym_buf = NULL; goto out; } } if (intsym_buf == NULL) { intsym_buf = bfd_malloc2 (symcount, sizeof (Elf_Internal_Sym)); if (intsym_buf == NULL) goto out; } /* Convert the symbols to internal form. */ isymend = intsym_buf + symcount; for (esym = extsym_buf, isym = intsym_buf, shndx = extshndx_buf; isym < isymend; esym += extsym_size, isym++, shndx = shndx != NULL ? shndx + 1 : NULL) if (!(*bed->s->swap_symbol_in) (ibfd, esym, shndx, isym)) { symoffset += (esym - (bfd_byte *) extsym_buf) / extsym_size; (*_bfd_error_handler) (_("%B symbol number %lu references " "nonexistent SHT_SYMTAB_SHNDX section"), ibfd, (unsigned long) symoffset); intsym_buf = NULL; goto out; } out: if (alloc_ext != NULL) free (alloc_ext); if (alloc_extshndx != NULL) free (alloc_extshndx); return intsym_buf; } /* Look up a symbol name. */ const char * bfd_elf_sym_name (bfd *abfd, Elf_Internal_Shdr *symtab_hdr, Elf_Internal_Sym *isym, asection *sym_sec) { const char *name; unsigned int iname = isym->st_name; unsigned int shindex = symtab_hdr->sh_link; if (iname == 0 && ELF_ST_TYPE (isym->st_info) == STT_SECTION /* Check for a bogus st_shndx to avoid crashing. */ && isym->st_shndx < elf_numsections (abfd) && !(isym->st_shndx >= SHN_LORESERVE && isym->st_shndx <= SHN_HIRESERVE)) { iname = elf_elfsections (abfd)[isym->st_shndx]->sh_name; shindex = elf_elfheader (abfd)->e_shstrndx; } name = bfd_elf_string_from_elf_section (abfd, shindex, iname); if (name == NULL) name = "(null)"; else if (sym_sec && *name == '\0') name = bfd_section_name (abfd, sym_sec); return name; } /* Elf_Internal_Shdr->contents is an array of these for SHT_GROUP sections. The first element is the flags, the rest are section pointers. */ typedef union elf_internal_group { Elf_Internal_Shdr *shdr; unsigned int flags; } Elf_Internal_Group; /* Return the name of the group signature symbol. Why isn't the signature just a string? */ static const char * group_signature (bfd *abfd, Elf_Internal_Shdr *ghdr) { Elf_Internal_Shdr *hdr; unsigned char esym[sizeof (Elf64_External_Sym)]; Elf_External_Sym_Shndx eshndx; Elf_Internal_Sym isym; /* First we need to ensure the symbol table is available. Make sure that it is a symbol table section. */ hdr = elf_elfsections (abfd) [ghdr->sh_link]; if (hdr->sh_type != SHT_SYMTAB || ! bfd_section_from_shdr (abfd, ghdr->sh_link)) return NULL; /* Go read the symbol. */ hdr = &elf_tdata (abfd)->symtab_hdr; if (bfd_elf_get_elf_syms (abfd, hdr, 1, ghdr->sh_info, &isym, esym, &eshndx) == NULL) return NULL; return bfd_elf_sym_name (abfd, hdr, &isym, NULL); } /* Set next_in_group list pointer, and group name for NEWSECT. */ static bfd_boolean setup_group (bfd *abfd, Elf_Internal_Shdr *hdr, asection *newsect) { unsigned int num_group = elf_tdata (abfd)->num_group; /* If num_group is zero, read in all SHT_GROUP sections. The count is set to -1 if there are no SHT_GROUP sections. */ if (num_group == 0) { unsigned int i, shnum; /* First count the number of groups. If we have a SHT_GROUP section with just a flag word (ie. sh_size is 4), ignore it. */ shnum = elf_numsections (abfd); num_group = 0; #define IS_VALID_GROUP_SECTION_HEADER(shdr) \ ( (shdr)->sh_type == SHT_GROUP \ && (shdr)->sh_size >= (2 * GRP_ENTRY_SIZE) \ && (shdr)->sh_entsize == GRP_ENTRY_SIZE \ && ((shdr)->sh_size % GRP_ENTRY_SIZE) == 0) for (i = 0; i < shnum; i++) { Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[i]; if (IS_VALID_GROUP_SECTION_HEADER (shdr)) num_group += 1; } if (num_group == 0) { num_group = (unsigned) -1; elf_tdata (abfd)->num_group = num_group; } else { /* We keep a list of elf section headers for group sections, so we can find them quickly. */ bfd_size_type amt; elf_tdata (abfd)->num_group = num_group; elf_tdata (abfd)->group_sect_ptr = bfd_alloc2 (abfd, num_group, sizeof (Elf_Internal_Shdr *)); if (elf_tdata (abfd)->group_sect_ptr == NULL) return FALSE; num_group = 0; for (i = 0; i < shnum; i++) { Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[i]; if (IS_VALID_GROUP_SECTION_HEADER (shdr)) { unsigned char *src; Elf_Internal_Group *dest; /* Add to list of sections. */ elf_tdata (abfd)->group_sect_ptr[num_group] = shdr; num_group += 1; /* Read the raw contents. */ BFD_ASSERT (sizeof (*dest) >= 4); amt = shdr->sh_size * sizeof (*dest) / 4; shdr->contents = bfd_alloc2 (abfd, shdr->sh_size, sizeof (*dest) / 4); /* PR binutils/4110: Handle corrupt group headers. */ if (shdr->contents == NULL) { _bfd_error_handler (_("%B: Corrupt size field in group section header: 0x%lx"), abfd, shdr->sh_size); bfd_set_error (bfd_error_bad_value); return FALSE; } memset (shdr->contents, 0, amt); if (bfd_seek (abfd, shdr->sh_offset, SEEK_SET) != 0 || (bfd_bread (shdr->contents, shdr->sh_size, abfd) != shdr->sh_size)) return FALSE; /* Translate raw contents, a flag word followed by an array of elf section indices all in target byte order, to the flag word followed by an array of elf section pointers. */ src = shdr->contents + shdr->sh_size; dest = (Elf_Internal_Group *) (shdr->contents + amt); while (1) { unsigned int idx; src -= 4; --dest; idx = H_GET_32 (abfd, src); if (src == shdr->contents) { dest->flags = idx; if (shdr->bfd_section != NULL && (idx & GRP_COMDAT)) shdr->bfd_section->flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_DISCARD; break; } if (idx >= shnum) { ((*_bfd_error_handler) (_("%B: invalid SHT_GROUP entry"), abfd)); idx = 0; } dest->shdr = elf_elfsections (abfd)[idx]; } } } } } if (num_group != (unsigned) -1) { unsigned int i; for (i = 0; i < num_group; i++) { Elf_Internal_Shdr *shdr = elf_tdata (abfd)->group_sect_ptr[i]; Elf_Internal_Group *idx = (Elf_Internal_Group *) shdr->contents; unsigned int n_elt = shdr->sh_size / 4; /* Look through this group's sections to see if current section is a member. */ while (--n_elt != 0) if ((++idx)->shdr == hdr) { asection *s = NULL; /* We are a member of this group. Go looking through other members to see if any others are linked via next_in_group. */ idx = (Elf_Internal_Group *) shdr->contents; n_elt = shdr->sh_size / 4; while (--n_elt != 0) if ((s = (++idx)->shdr->bfd_section) != NULL && elf_next_in_group (s) != NULL) break; if (n_elt != 0) { /* Snarf the group name from other member, and insert current section in circular list. */ elf_group_name (newsect) = elf_group_name (s); elf_next_in_group (newsect) = elf_next_in_group (s); elf_next_in_group (s) = newsect; } else { const char *gname; gname = group_signature (abfd, shdr); if (gname == NULL) return FALSE; elf_group_name (newsect) = gname; /* Start a circular list with one element. */ elf_next_in_group (newsect) = newsect; } /* If the group section has been created, point to the new member. */ if (shdr->bfd_section != NULL) elf_next_in_group (shdr->bfd_section) = newsect; i = num_group - 1; break; } } } if (elf_group_name (newsect) == NULL) { (*_bfd_error_handler) (_("%B: no group info for section %A"), abfd, newsect); } return TRUE; } bfd_boolean _bfd_elf_setup_sections (bfd *abfd) { unsigned int i; unsigned int num_group = elf_tdata (abfd)->num_group; bfd_boolean result = TRUE; asection *s; /* Process SHF_LINK_ORDER. */ for (s = abfd->sections; s != NULL; s = s->next) { Elf_Internal_Shdr *this_hdr = &elf_section_data (s)->this_hdr; if ((this_hdr->sh_flags & SHF_LINK_ORDER) != 0) { unsigned int elfsec = this_hdr->sh_link; /* FIXME: The old Intel compiler and old strip/objcopy may not set the sh_link or sh_info fields. Hence we could get the situation where elfsec is 0. */ if (elfsec == 0) { const struct elf_backend_data *bed = get_elf_backend_data (abfd); if (bed->link_order_error_handler) bed->link_order_error_handler (_("%B: warning: sh_link not set for section `%A'"), abfd, s); } else { asection *link; this_hdr = elf_elfsections (abfd)[elfsec]; /* PR 1991, 2008: Some strip/objcopy may leave an incorrect value in sh_link. We don't want to proceed. */ link = this_hdr->bfd_section; if (link == NULL) { (*_bfd_error_handler) (_("%B: sh_link [%d] in section `%A' is incorrect"), s->owner, s, elfsec); result = FALSE; } elf_linked_to_section (s) = link; } } } /* Process section groups. */ if (num_group == (unsigned) -1) return result; for (i = 0; i < num_group; i++) { Elf_Internal_Shdr *shdr = elf_tdata (abfd)->group_sect_ptr[i]; Elf_Internal_Group *idx = (Elf_Internal_Group *) shdr->contents; unsigned int n_elt = shdr->sh_size / 4; while (--n_elt != 0) if ((++idx)->shdr->bfd_section) elf_sec_group (idx->shdr->bfd_section) = shdr->bfd_section; else if (idx->shdr->sh_type == SHT_RELA || idx->shdr->sh_type == SHT_REL) /* We won't include relocation sections in section groups in output object files. We adjust the group section size here so that relocatable link will work correctly when relocation sections are in section group in input object files. */ shdr->bfd_section->size -= 4; else { /* There are some unknown sections in the group. */ (*_bfd_error_handler) (_("%B: unknown [%d] section `%s' in group [%s]"), abfd, (unsigned int) idx->shdr->sh_type, bfd_elf_string_from_elf_section (abfd, (elf_elfheader (abfd) ->e_shstrndx), idx->shdr->sh_name), shdr->bfd_section->name); result = FALSE; } } return result; } bfd_boolean bfd_elf_is_group_section (bfd *abfd ATTRIBUTE_UNUSED, const asection *sec) { return elf_next_in_group (sec) != NULL; } /* Make a BFD section from an ELF section. We store a pointer to the BFD section in the bfd_section field of the header. */ bfd_boolean _bfd_elf_make_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr, const char *name, int shindex) { asection *newsect; flagword flags; const struct elf_backend_data *bed; if (hdr->bfd_section != NULL) { BFD_ASSERT (strcmp (name, bfd_get_section_name (abfd, hdr->bfd_section)) == 0); return TRUE; } newsect = bfd_make_section_anyway (abfd, name); if (newsect == NULL) return FALSE; hdr->bfd_section = newsect; elf_section_data (newsect)->this_hdr = *hdr; elf_section_data (newsect)->this_idx = shindex; /* Always use the real type/flags. */ elf_section_type (newsect) = hdr->sh_type; elf_section_flags (newsect) = hdr->sh_flags; newsect->filepos = hdr->sh_offset; if (! bfd_set_section_vma (abfd, newsect, hdr->sh_addr) || ! bfd_set_section_size (abfd, newsect, hdr->sh_size) || ! bfd_set_section_alignment (abfd, newsect, bfd_log2 ((bfd_vma) hdr->sh_addralign))) return FALSE; flags = SEC_NO_FLAGS; if (hdr->sh_type != SHT_NOBITS) flags |= SEC_HAS_CONTENTS; if (hdr->sh_type == SHT_GROUP) flags |= SEC_GROUP | SEC_EXCLUDE; if ((hdr->sh_flags & SHF_ALLOC) != 0) { flags |= SEC_ALLOC; if (hdr->sh_type != SHT_NOBITS) flags |= SEC_LOAD; } if ((hdr->sh_flags & SHF_WRITE) == 0) flags |= SEC_READONLY; if ((hdr->sh_flags & SHF_EXECINSTR) != 0) flags |= SEC_CODE; else if ((flags & SEC_LOAD) != 0) flags |= SEC_DATA; if ((hdr->sh_flags & SHF_MERGE) != 0) { flags |= SEC_MERGE; newsect->entsize = hdr->sh_entsize; if ((hdr->sh_flags & SHF_STRINGS) != 0) flags |= SEC_STRINGS; } if (hdr->sh_flags & SHF_GROUP) if (!setup_group (abfd, hdr, newsect)) return FALSE; if ((hdr->sh_flags & SHF_TLS) != 0) flags |= SEC_THREAD_LOCAL; if ((flags & SEC_ALLOC) == 0) { /* The debugging sections appear to be recognized only by name, not any sort of flag. Their SEC_ALLOC bits are cleared. */ static const struct { const char *name; int len; } debug_sections [] = { { STRING_COMMA_LEN ("debug") }, /* 'd' */ { NULL, 0 }, /* 'e' */ { NULL, 0 }, /* 'f' */ { STRING_COMMA_LEN ("gnu.linkonce.wi.") }, /* 'g' */ { NULL, 0 }, /* 'h' */ { NULL, 0 }, /* 'i' */ { NULL, 0 }, /* 'j' */ { NULL, 0 }, /* 'k' */ { STRING_COMMA_LEN ("line") }, /* 'l' */ { NULL, 0 }, /* 'm' */ { NULL, 0 }, /* 'n' */ { NULL, 0 }, /* 'o' */ { NULL, 0 }, /* 'p' */ { NULL, 0 }, /* 'q' */ { NULL, 0 }, /* 'r' */ { STRING_COMMA_LEN ("stab") } /* 's' */ }; if (name [0] == '.') { int i = name [1] - 'd'; if (i >= 0 && i < (int) ARRAY_SIZE (debug_sections) && debug_sections [i].name != NULL && strncmp (&name [1], debug_sections [i].name, debug_sections [i].len) == 0) flags |= SEC_DEBUGGING; } } /* As a GNU extension, if the name begins with .gnu.linkonce, we only link a single copy of the section. This is used to support g++. g++ will emit each template expansion in its own section. The symbols will be defined as weak, so that multiple definitions are permitted. The GNU linker extension is to actually discard all but one of the sections. */ if (CONST_STRNEQ (name, ".gnu.linkonce") && elf_next_in_group (newsect) == NULL) flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_DISCARD; bed = get_elf_backend_data (abfd); if (bed->elf_backend_section_flags) if (! bed->elf_backend_section_flags (&flags, hdr)) return FALSE; if (! bfd_set_section_flags (abfd, newsect, flags)) return FALSE; if ((flags & SEC_ALLOC) != 0) { Elf_Internal_Phdr *phdr; unsigned int i; /* Look through the phdrs to see if we need to adjust the lma. If all the p_paddr fields are zero, we ignore them, since some ELF linkers produce such output. */ phdr = elf_tdata (abfd)->phdr; for (i = 0; i < elf_elfheader (abfd)->e_phnum; i++, phdr++) { if (phdr->p_paddr != 0) break; } if (i < elf_elfheader (abfd)->e_phnum) { phdr = elf_tdata (abfd)->phdr; for (i = 0; i < elf_elfheader (abfd)->e_phnum; i++, phdr++) { /* This section is part of this segment if its file offset plus size lies within the segment's memory span and, if the section is loaded, the extent of the loaded data lies within the extent of the segment. Note - we used to check the p_paddr field as well, and refuse to set the LMA if it was 0. This is wrong though, as a perfectly valid initialised segment can have a p_paddr of zero. Some architectures, eg ARM, place special significance on the address 0 and executables need to be able to have a segment which covers this address. */ if (phdr->p_type == PT_LOAD && (bfd_vma) hdr->sh_offset >= phdr->p_offset && (hdr->sh_offset + hdr->sh_size <= phdr->p_offset + phdr->p_memsz) && ((flags & SEC_LOAD) == 0 || (hdr->sh_offset + hdr->sh_size <= phdr->p_offset + phdr->p_filesz))) { if ((flags & SEC_LOAD) == 0) newsect->lma = (phdr->p_paddr + hdr->sh_addr - phdr->p_vaddr); else /* We used to use the same adjustment for SEC_LOAD sections, but that doesn't work if the segment is packed with code from multiple VMAs. Instead we calculate the section LMA based on the segment LMA. It is assumed that the segment will contain sections with contiguous LMAs, even if the VMAs are not. */ newsect->lma = (phdr->p_paddr + hdr->sh_offset - phdr->p_offset); /* With contiguous segments, we can't tell from file offsets whether a section with zero size should be placed at the end of one segment or the beginning of the next. Decide based on vaddr. */ if (hdr->sh_addr >= phdr->p_vaddr && (hdr->sh_addr + hdr->sh_size <= phdr->p_vaddr + phdr->p_memsz)) break; } } } } return TRUE; } /* INTERNAL_FUNCTION bfd_elf_find_section SYNOPSIS struct elf_internal_shdr *bfd_elf_find_section (bfd *abfd, char *name); DESCRIPTION Helper functions for GDB to locate the string tables. Since BFD hides string tables from callers, GDB needs to use an internal hook to find them. Sun's .stabstr, in particular, isn't even pointed to by the .stab section, so ordinary mechanisms wouldn't work to find it, even if we had some. */ struct elf_internal_shdr * bfd_elf_find_section (bfd *abfd, char *name) { Elf_Internal_Shdr **i_shdrp; char *shstrtab; unsigned int max; unsigned int i; i_shdrp = elf_elfsections (abfd); if (i_shdrp != NULL) { shstrtab = bfd_elf_get_str_section (abfd, elf_elfheader (abfd)->e_shstrndx); if (shstrtab != NULL) { max = elf_numsections (abfd); for (i = 1; i < max; i++) if (!strcmp (&shstrtab[i_shdrp[i]->sh_name], name)) return i_shdrp[i]; } } return 0; } const char *const bfd_elf_section_type_names[] = { "SHT_NULL", "SHT_PROGBITS", "SHT_SYMTAB", "SHT_STRTAB", "SHT_RELA", "SHT_HASH", "SHT_DYNAMIC", "SHT_NOTE", "SHT_NOBITS", "SHT_REL", "SHT_SHLIB", "SHT_DYNSYM", }; /* ELF relocs are against symbols. If we are producing relocatable output, and the reloc is against an external symbol, and nothing has given us any additional addend, the resulting reloc will also be against the same symbol. In such a case, we don't want to change anything about the way the reloc is handled, since it will all be done at final link time. Rather than put special case code into bfd_perform_relocation, all the reloc types use this howto function. It just short circuits the reloc if producing relocatable output against an external symbol. */ bfd_reloc_status_type bfd_elf_generic_reloc (bfd *abfd ATTRIBUTE_UNUSED, arelent *reloc_entry, asymbol *symbol, void *data ATTRIBUTE_UNUSED, asection *input_section, bfd *output_bfd, char **error_message ATTRIBUTE_UNUSED) { if (output_bfd != NULL && (symbol->flags & BSF_SECTION_SYM) == 0 && (! reloc_entry->howto->partial_inplace || reloc_entry->addend == 0)) { reloc_entry->address += input_section->output_offset; return bfd_reloc_ok; } return bfd_reloc_continue; } /* Make sure sec_info_type is cleared if sec_info is cleared too. */ static void merge_sections_remove_hook (bfd *abfd ATTRIBUTE_UNUSED, asection *sec) { BFD_ASSERT (sec->sec_info_type == ELF_INFO_TYPE_MERGE); sec->sec_info_type = ELF_INFO_TYPE_NONE; } /* Finish SHF_MERGE section merging. */ bfd_boolean _bfd_elf_merge_sections (bfd *abfd, struct bfd_link_info *info) { bfd *ibfd; asection *sec; if (!is_elf_hash_table (info->hash)) return FALSE; for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next) if ((ibfd->flags & DYNAMIC) == 0) for (sec = ibfd->sections; sec != NULL; sec = sec->next) if ((sec->flags & SEC_MERGE) != 0 && !bfd_is_abs_section (sec->output_section)) { struct bfd_elf_section_data *secdata; secdata = elf_section_data (sec); if (! _bfd_add_merge_section (abfd, &elf_hash_table (info)->merge_info, sec, &secdata->sec_info)) return FALSE; else if (secdata->sec_info) sec->sec_info_type = ELF_INFO_TYPE_MERGE; } if (elf_hash_table (info)->merge_info != NULL) _bfd_merge_sections (abfd, info, elf_hash_table (info)->merge_info, merge_sections_remove_hook); return TRUE; } void _bfd_elf_link_just_syms (asection *sec, struct bfd_link_info *info) { sec->output_section = bfd_abs_section_ptr; sec->output_offset = sec->vma; if (!is_elf_hash_table (info->hash)) return; sec->sec_info_type = ELF_INFO_TYPE_JUST_SYMS; } /* Copy the program header and other data from one object module to another. */ bfd_boolean _bfd_elf_copy_private_bfd_data (bfd *ibfd, bfd *obfd) { if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour || bfd_get_flavour (obfd) != bfd_target_elf_flavour) return TRUE; BFD_ASSERT (!elf_flags_init (obfd) || (elf_elfheader (obfd)->e_flags == elf_elfheader (ibfd)->e_flags)); elf_gp (obfd) = elf_gp (ibfd); elf_elfheader (obfd)->e_flags = elf_elfheader (ibfd)->e_flags; elf_flags_init (obfd) = TRUE; /* Copy object attributes. */ _bfd_elf_copy_obj_attributes (ibfd, obfd); return TRUE; } static const char * get_segment_type (unsigned int p_type) { const char *pt; switch (p_type) { case PT_NULL: pt = "NULL"; break; case PT_LOAD: pt = "LOAD"; break; case PT_DYNAMIC: pt = "DYNAMIC"; break; case PT_INTERP: pt = "INTERP"; break; case PT_NOTE: pt = "NOTE"; break; case PT_SHLIB: pt = "SHLIB"; break; case PT_PHDR: pt = "PHDR"; break; case PT_TLS: pt = "TLS"; break; case PT_GNU_EH_FRAME: pt = "EH_FRAME"; break; case PT_GNU_STACK: pt = "STACK"; break; case PT_GNU_RELRO: pt = "RELRO"; break; default: pt = NULL; break; } return pt; } /* Print out the program headers. */ bfd_boolean _bfd_elf_print_private_bfd_data (bfd *abfd, void *farg) { FILE *f = farg; Elf_Internal_Phdr *p; asection *s; bfd_byte *dynbuf = NULL; p = elf_tdata (abfd)->phdr; if (p != NULL) { unsigned int i, c; fprintf (f, _("\nProgram Header:\n")); c = elf_elfheader (abfd)->e_phnum; for (i = 0; i < c; i++, p++) { const char *pt = get_segment_type (p->p_type); char buf[20]; if (pt == NULL) { sprintf (buf, "0x%lx", p->p_type); pt = buf; } fprintf (f, "%8s off 0x", pt); bfd_fprintf_vma (abfd, f, p->p_offset); fprintf (f, " vaddr 0x"); bfd_fprintf_vma (abfd, f, p->p_vaddr); fprintf (f, " paddr 0x"); bfd_fprintf_vma (abfd, f, p->p_paddr); fprintf (f, " align 2**%u\n", bfd_log2 (p->p_align)); fprintf (f, " filesz 0x"); bfd_fprintf_vma (abfd, f, p->p_filesz); fprintf (f, " memsz 0x"); bfd_fprintf_vma (abfd, f, p->p_memsz); fprintf (f, " flags %c%c%c", (p->p_flags & PF_R) != 0 ? 'r' : '-', (p->p_flags & PF_W) != 0 ? 'w' : '-', (p->p_flags & PF_X) != 0 ? 'x' : '-'); if ((p->p_flags &~ (unsigned) (PF_R | PF_W | PF_X)) != 0) fprintf (f, " %lx", p->p_flags &~ (unsigned) (PF_R | PF_W | PF_X)); fprintf (f, "\n"); } } s = bfd_get_section_by_name (abfd, ".dynamic"); if (s != NULL) { int elfsec; unsigned long shlink; bfd_byte *extdyn, *extdynend; size_t extdynsize; void (*swap_dyn_in) (bfd *, const void *, Elf_Internal_Dyn *); fprintf (f, _("\nDynamic Section:\n")); if (!bfd_malloc_and_get_section (abfd, s, &dynbuf)) goto error_return; elfsec = _bfd_elf_section_from_bfd_section (abfd, s); if (elfsec == -1) goto error_return; shlink = elf_elfsections (abfd)[elfsec]->sh_link; extdynsize = get_elf_backend_data (abfd)->s->sizeof_dyn; swap_dyn_in = get_elf_backend_data (abfd)->s->swap_dyn_in; extdyn = dynbuf; extdynend = extdyn + s->size; for (; extdyn < extdynend; extdyn += extdynsize) { Elf_Internal_Dyn dyn; const char *name; char ab[20]; bfd_boolean stringp; (*swap_dyn_in) (abfd, extdyn, &dyn); if (dyn.d_tag == DT_NULL) break; stringp = FALSE; switch (dyn.d_tag) { default: sprintf (ab, "0x%lx", (unsigned long) dyn.d_tag); name = ab; break; case DT_NEEDED: name = "NEEDED"; stringp = TRUE; break; case DT_PLTRELSZ: name = "PLTRELSZ"; break; case DT_PLTGOT: name = "PLTGOT"; break; case DT_HASH: name = "HASH"; break; case DT_STRTAB: name = "STRTAB"; break; case DT_SYMTAB: name = "SYMTAB"; break; case DT_RELA: name = "RELA"; break; case DT_RELASZ: name = "RELASZ"; break; case DT_RELAENT: name = "RELAENT"; break; case DT_STRSZ: name = "STRSZ"; break; case DT_SYMENT: name = "SYMENT"; break; case DT_INIT: name = "INIT"; break; case DT_FINI: name = "FINI"; break; case DT_SONAME: name = "SONAME"; stringp = TRUE; break; case DT_RPATH: name = "RPATH"; stringp = TRUE; break; case DT_SYMBOLIC: name = "SYMBOLIC"; break; case DT_REL: name = "REL"; break; case DT_RELSZ: name = "RELSZ"; break; case DT_RELENT: name = "RELENT"; break; case DT_PLTREL: name = "PLTREL"; break; case DT_DEBUG: name = "DEBUG"; break; case DT_TEXTREL: name = "TEXTREL"; break; case DT_JMPREL: name = "JMPREL"; break; case DT_BIND_NOW: name = "BIND_NOW"; break; case DT_INIT_ARRAY: name = "INIT_ARRAY"; break; case DT_FINI_ARRAY: name = "FINI_ARRAY"; break; case DT_INIT_ARRAYSZ: name = "INIT_ARRAYSZ"; break; case DT_FINI_ARRAYSZ: name = "FINI_ARRAYSZ"; break; case DT_RUNPATH: name = "RUNPATH"; stringp = TRUE; break; case DT_FLAGS: name = "FLAGS"; break; case DT_PREINIT_ARRAY: name = "PREINIT_ARRAY"; break; case DT_PREINIT_ARRAYSZ: name = "PREINIT_ARRAYSZ"; break; case DT_CHECKSUM: name = "CHECKSUM"; break; case DT_PLTPADSZ: name = "PLTPADSZ"; break; case DT_MOVEENT: name = "MOVEENT"; break; case DT_MOVESZ: name = "MOVESZ"; break; case DT_FEATURE: name = "FEATURE"; break; case DT_POSFLAG_1: name = "POSFLAG_1"; break; case DT_SYMINSZ: name = "SYMINSZ"; break; case DT_SYMINENT: name = "SYMINENT"; break; case DT_CONFIG: name = "CONFIG"; stringp = TRUE; break; case DT_DEPAUDIT: name = "DEPAUDIT"; stringp = TRUE; break; case DT_AUDIT: name = "AUDIT"; stringp = TRUE; break; case DT_PLTPAD: name = "PLTPAD"; break; case DT_MOVETAB: name = "MOVETAB"; break; case DT_SYMINFO: name = "SYMINFO"; break; case DT_RELACOUNT: name = "RELACOUNT"; break; case DT_RELCOUNT: name = "RELCOUNT"; break; case DT_FLAGS_1: name = "FLAGS_1"; break; case DT_VERSYM: name = "VERSYM"; break; case DT_VERDEF: name = "VERDEF"; break; case DT_VERDEFNUM: name = "VERDEFNUM"; break; case DT_VERNEED: name = "VERNEED"; break; case DT_VERNEEDNUM: name = "VERNEEDNUM"; break; case DT_AUXILIARY: name = "AUXILIARY"; stringp = TRUE; break; case DT_USED: name = "USED"; break; case DT_FILTER: name = "FILTER"; stringp = TRUE; break; case DT_GNU_HASH: name = "GNU_HASH"; break; } fprintf (f, " %-11s ", name); if (! stringp) fprintf (f, "0x%lx", (unsigned long) dyn.d_un.d_val); else { const char *string; unsigned int tagv = dyn.d_un.d_val; string = bfd_elf_string_from_elf_section (abfd, shlink, tagv); if (string == NULL) goto error_return; fprintf (f, "%s", string); } fprintf (f, "\n"); } free (dynbuf); dynbuf = NULL; } if ((elf_dynverdef (abfd) != 0 && elf_tdata (abfd)->verdef == NULL) || (elf_dynverref (abfd) != 0 && elf_tdata (abfd)->verref == NULL)) { if (! _bfd_elf_slurp_version_tables (abfd, FALSE)) return FALSE; } if (elf_dynverdef (abfd) != 0) { Elf_Internal_Verdef *t; fprintf (f, _("\nVersion definitions:\n")); for (t = elf_tdata (abfd)->verdef; t != NULL; t = t->vd_nextdef) { fprintf (f, "%d 0x%2.2x 0x%8.8lx %s\n", t->vd_ndx, t->vd_flags, t->vd_hash, t->vd_nodename ? t->vd_nodename : ""); if (t->vd_auxptr != NULL && t->vd_auxptr->vda_nextptr != NULL) { Elf_Internal_Verdaux *a; fprintf (f, "\t"); for (a = t->vd_auxptr->vda_nextptr; a != NULL; a = a->vda_nextptr) fprintf (f, "%s ", a->vda_nodename ? a->vda_nodename : ""); fprintf (f, "\n"); } } } if (elf_dynverref (abfd) != 0) { Elf_Internal_Verneed *t; fprintf (f, _("\nVersion References:\n")); for (t = elf_tdata (abfd)->verref; t != NULL; t = t->vn_nextref) { Elf_Internal_Vernaux *a; fprintf (f, _(" required from %s:\n"), t->vn_filename ? t->vn_filename : ""); for (a = t->vn_auxptr; a != NULL; a = a->vna_nextptr) fprintf (f, " 0x%8.8lx 0x%2.2x %2.2d %s\n", a->vna_hash, a->vna_flags, a->vna_other, a->vna_nodename ? a->vna_nodename : ""); } } return TRUE; error_return: if (dynbuf != NULL) free (dynbuf); return FALSE; } /* Display ELF-specific fields of a symbol. */ void bfd_elf_print_symbol (bfd *abfd, void *filep, asymbol *symbol, bfd_print_symbol_type how) { FILE *file = filep; switch (how) { case bfd_print_symbol_name: fprintf (file, "%s", symbol->name); break; case bfd_print_symbol_more: fprintf (file, "elf "); bfd_fprintf_vma (abfd, file, symbol->value); fprintf (file, " %lx", (long) symbol->flags); break; case bfd_print_symbol_all: { const char *section_name; const char *name = NULL; const struct elf_backend_data *bed; unsigned char st_other; bfd_vma val; section_name = symbol->section ? symbol->section->name : "(*none*)"; bed = get_elf_backend_data (abfd); if (bed->elf_backend_print_symbol_all) name = (*bed->elf_backend_print_symbol_all) (abfd, filep, symbol); if (name == NULL) { name = symbol->name; bfd_print_symbol_vandf (abfd, file, symbol); } fprintf (file, " %s\t", section_name); /* Print the "other" value for a symbol. For common symbols, we've already printed the size; now print the alignment. For other symbols, we have no specified alignment, and we've printed the address; now print the size. */ if (bfd_is_com_section (symbol->section)) val = ((elf_symbol_type *) symbol)->internal_elf_sym.st_value; else val = ((elf_symbol_type *) symbol)->internal_elf_sym.st_size; bfd_fprintf_vma (abfd, file, val); /* If we have version information, print it. */ if (elf_tdata (abfd)->dynversym_section != 0 && (elf_tdata (abfd)->dynverdef_section != 0 || elf_tdata (abfd)->dynverref_section != 0)) { unsigned int vernum; const char *version_string; vernum = ((elf_symbol_type *) symbol)->version & VERSYM_VERSION; if (vernum == 0) version_string = ""; else if (vernum == 1) version_string = "Base"; else if (vernum <= elf_tdata (abfd)->cverdefs) version_string = elf_tdata (abfd)->verdef[vernum - 1].vd_nodename; else { Elf_Internal_Verneed *t; version_string = ""; for (t = elf_tdata (abfd)->verref; t != NULL; t = t->vn_nextref) { Elf_Internal_Vernaux *a; for (a = t->vn_auxptr; a != NULL; a = a->vna_nextptr) { if (a->vna_other == vernum) { version_string = a->vna_nodename; break; } } } } if ((((elf_symbol_type *) symbol)->version & VERSYM_HIDDEN) == 0) fprintf (file, " %-11s", version_string); else { int i; fprintf (file, " (%s)", version_string); for (i = 10 - strlen (version_string); i > 0; --i) putc (' ', file); } } /* If the st_other field is not zero, print it. */ st_other = ((elf_symbol_type *) symbol)->internal_elf_sym.st_other; switch (st_other) { case 0: break; case STV_INTERNAL: fprintf (file, " .internal"); break; case STV_HIDDEN: fprintf (file, " .hidden"); break; case STV_PROTECTED: fprintf (file, " .protected"); break; default: /* Some other non-defined flags are also present, so print everything hex. */ fprintf (file, " 0x%02x", (unsigned int) st_other); } fprintf (file, " %s", name); } break; } } /* Create an entry in an ELF linker hash table. */ struct bfd_hash_entry * _bfd_elf_link_hash_newfunc (struct bfd_hash_entry *entry, struct bfd_hash_table *table, const char *string) { /* Allocate the structure if it has not already been allocated by a subclass. */ if (entry == NULL) { entry = bfd_hash_allocate (table, sizeof (struct elf_link_hash_entry)); if (entry == NULL) return entry; } /* Call the allocation method of the superclass. */ entry = _bfd_link_hash_newfunc (entry, table, string); if (entry != NULL) { struct elf_link_hash_entry *ret = (struct elf_link_hash_entry *) entry; struct elf_link_hash_table *htab = (struct elf_link_hash_table *) table; /* Set local fields. */ ret->indx = -1; ret->dynindx = -1; ret->got = htab->init_got_refcount; ret->plt = htab->init_plt_refcount; memset (&ret->size, 0, (sizeof (struct elf_link_hash_entry) - offsetof (struct elf_link_hash_entry, size))); /* Assume that we have been called by a non-ELF symbol reader. This flag is then reset by the code which reads an ELF input file. This ensures that a symbol created by a non-ELF symbol reader will have the flag set correctly. */ ret->non_elf = 1; } return entry; } /* Copy data from an indirect symbol to its direct symbol, hiding the old indirect symbol. Also used for copying flags to a weakdef. */ void _bfd_elf_link_hash_copy_indirect (struct bfd_link_info *info, struct elf_link_hash_entry *dir, struct elf_link_hash_entry *ind) { struct elf_link_hash_table *htab; /* Copy down any references that we may have already seen to the symbol which just became indirect. */ dir->ref_dynamic |= ind->ref_dynamic; dir->ref_regular |= ind->ref_regular; dir->ref_regular_nonweak |= ind->ref_regular_nonweak; dir->non_got_ref |= ind->non_got_ref; dir->needs_plt |= ind->needs_plt; dir->pointer_equality_needed |= ind->pointer_equality_needed; if (ind->root.type != bfd_link_hash_indirect) return; /* Copy over the global and procedure linkage table refcount entries. These may have been already set up by a check_relocs routine. */ htab = elf_hash_table (info); if (ind->got.refcount > htab->init_got_refcount.refcount) { if (dir->got.refcount < 0) dir->got.refcount = 0; dir->got.refcount += ind->got.refcount; ind->got.refcount = htab->init_got_refcount.refcount; } if (ind->plt.refcount > htab->init_plt_refcount.refcount) { if (dir->plt.refcount < 0) dir->plt.refcount = 0; dir->plt.refcount += ind->plt.refcount; ind->plt.refcount = htab->init_plt_refcount.refcount; } if (ind->dynindx != -1) { if (dir->dynindx != -1) _bfd_elf_strtab_delref (htab->dynstr, dir->dynstr_index); dir->dynindx = ind->dynindx; dir->dynstr_index = ind->dynstr_index; ind->dynindx = -1; ind->dynstr_index = 0; } } void _bfd_elf_link_hash_hide_symbol (struct bfd_link_info *info, struct elf_link_hash_entry *h, bfd_boolean force_local) { h->plt = elf_hash_table (info)->init_plt_offset; h->needs_plt = 0; if (force_local) { h->forced_local = 1; if (h->dynindx != -1) { h->dynindx = -1; _bfd_elf_strtab_delref (elf_hash_table (info)->dynstr, h->dynstr_index); } } } /* Initialize an ELF linker hash table. */ bfd_boolean _bfd_elf_link_hash_table_init (struct elf_link_hash_table *table, bfd *abfd, struct bfd_hash_entry *(*newfunc) (struct bfd_hash_entry *, struct bfd_hash_table *, const char *), unsigned int entsize) { bfd_boolean ret; int can_refcount = get_elf_backend_data (abfd)->can_refcount; memset (table, 0, sizeof * table); table->init_got_refcount.refcount = can_refcount - 1; table->init_plt_refcount.refcount = can_refcount - 1; table->init_got_offset.offset = -(bfd_vma) 1; table->init_plt_offset.offset = -(bfd_vma) 1; /* The first dynamic symbol is a dummy. */ table->dynsymcount = 1; ret = _bfd_link_hash_table_init (&table->root, abfd, newfunc, entsize); table->root.type = bfd_link_elf_hash_table; return ret; } /* Create an ELF linker hash table. */ struct bfd_link_hash_table * _bfd_elf_link_hash_table_create (bfd *abfd) { struct elf_link_hash_table *ret; bfd_size_type amt = sizeof (struct elf_link_hash_table); ret = bfd_malloc (amt); if (ret == NULL) return NULL; if (! _bfd_elf_link_hash_table_init (ret, abfd, _bfd_elf_link_hash_newfunc, sizeof (struct elf_link_hash_entry))) { free (ret); return NULL; } return &ret->root; } /* This is a hook for the ELF emulation code in the generic linker to tell the backend linker what file name to use for the DT_NEEDED entry for a dynamic object. */ void bfd_elf_set_dt_needed_name (bfd *abfd, const char *name) { if (bfd_get_flavour (abfd) == bfd_target_elf_flavour && bfd_get_format (abfd) == bfd_object) elf_dt_name (abfd) = name; } int bfd_elf_get_dyn_lib_class (bfd *abfd) { int lib_class; if (bfd_get_flavour (abfd) == bfd_target_elf_flavour && bfd_get_format (abfd) == bfd_object) lib_class = elf_dyn_lib_class (abfd); else lib_class = 0; return lib_class; } void bfd_elf_set_dyn_lib_class (bfd *abfd, enum dynamic_lib_link_class lib_class) { if (bfd_get_flavour (abfd) == bfd_target_elf_flavour && bfd_get_format (abfd) == bfd_object) elf_dyn_lib_class (abfd) = lib_class; } /* Get the list of DT_NEEDED entries for a link. This is a hook for the linker ELF emulation code. */ struct bfd_link_needed_list * bfd_elf_get_needed_list (bfd *abfd ATTRIBUTE_UNUSED, struct bfd_link_info *info) { if (! is_elf_hash_table (info->hash)) return NULL; return elf_hash_table (info)->needed; } /* Get the list of DT_RPATH/DT_RUNPATH entries for a link. This is a hook for the linker ELF emulation code. */ struct bfd_link_needed_list * bfd_elf_get_runpath_list (bfd *abfd ATTRIBUTE_UNUSED, struct bfd_link_info *info) { if (! is_elf_hash_table (info->hash)) return NULL; return elf_hash_table (info)->runpath; } /* Get the name actually used for a dynamic object for a link. This is the SONAME entry if there is one. Otherwise, it is the string passed to bfd_elf_set_dt_needed_name, or it is the filename. */ const char * bfd_elf_get_dt_soname (bfd *abfd) { if (bfd_get_flavour (abfd) == bfd_target_elf_flavour && bfd_get_format (abfd) == bfd_object) return elf_dt_name (abfd); return NULL; } /* Get the list of DT_NEEDED entries from a BFD. This is a hook for the ELF linker emulation code. */ bfd_boolean bfd_elf_get_bfd_needed_list (bfd *abfd, struct bfd_link_needed_list **pneeded) { asection *s; bfd_byte *dynbuf = NULL; int elfsec; unsigned long shlink; bfd_byte *extdyn, *extdynend; size_t extdynsize; void (*swap_dyn_in) (bfd *, const void *, Elf_Internal_Dyn *); *pneeded = NULL; if (bfd_get_flavour (abfd) != bfd_target_elf_flavour || bfd_get_format (abfd) != bfd_object) return TRUE; s = bfd_get_section_by_name (abfd, ".dynamic"); if (s == NULL || s->size == 0) return TRUE; if (!bfd_malloc_and_get_section (abfd, s, &dynbuf)) goto error_return; elfsec = _bfd_elf_section_from_bfd_section (abfd, s); if (elfsec == -1) goto error_return; shlink = elf_elfsections (abfd)[elfsec]->sh_link; extdynsize = get_elf_backend_data (abfd)->s->sizeof_dyn; swap_dyn_in = get_elf_backend_data (abfd)->s->swap_dyn_in; extdyn = dynbuf; extdynend = extdyn + s->size; for (; extdyn < extdynend; extdyn += extdynsize) { Elf_Internal_Dyn dyn; (*swap_dyn_in) (abfd, extdyn, &dyn); if (dyn.d_tag == DT_NULL) break; if (dyn.d_tag == DT_NEEDED) { const char *string; struct bfd_link_needed_list *l; unsigned int tagv = dyn.d_un.d_val; bfd_size_type amt; string = bfd_elf_string_from_elf_section (abfd, shlink, tagv); if (string == NULL) goto error_return; amt = sizeof *l; l = bfd_alloc (abfd, amt); if (l == NULL) goto error_return; l->by = abfd; l->name = string; l->next = *pneeded; *pneeded = l; } } free (dynbuf); return TRUE; error_return: if (dynbuf != NULL) free (dynbuf); return FALSE; } /* Allocate an ELF string table--force the first byte to be zero. */ struct bfd_strtab_hash * _bfd_elf_stringtab_init (void) { struct bfd_strtab_hash *ret; ret = _bfd_stringtab_init (); if (ret != NULL) { bfd_size_type loc; loc = _bfd_stringtab_add (ret, "", TRUE, FALSE); BFD_ASSERT (loc == 0 || loc == (bfd_size_type) -1); if (loc == (bfd_size_type) -1) { _bfd_stringtab_free (ret); ret = NULL; } } return ret; } /* ELF .o/exec file reading */ /* Create a new bfd section from an ELF section header. */ bfd_boolean bfd_section_from_shdr (bfd *abfd, unsigned int shindex) { Elf_Internal_Shdr *hdr = elf_elfsections (abfd)[shindex]; Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd); const struct elf_backend_data *bed = get_elf_backend_data (abfd); const char *name; name = bfd_elf_string_from_elf_section (abfd, elf_elfheader (abfd)->e_shstrndx, hdr->sh_name); if (name == NULL) return FALSE; switch (hdr->sh_type) { case SHT_NULL: /* Inactive section. Throw it away. */ return TRUE; case SHT_PROGBITS: /* Normal section with contents. */ case SHT_NOBITS: /* .bss section. */ case SHT_HASH: /* .hash section. */ case SHT_NOTE: /* .note section. */ case SHT_INIT_ARRAY: /* .init_array section. */ case SHT_FINI_ARRAY: /* .fini_array section. */ case SHT_PREINIT_ARRAY: /* .preinit_array section. */ case SHT_GNU_LIBLIST: /* .gnu.liblist section. */ case SHT_GNU_HASH: /* .gnu.hash section. */ return _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex); case SHT_DYNAMIC: /* Dynamic linking information. */ if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex)) return FALSE; if (hdr->sh_link > elf_numsections (abfd) || elf_elfsections (abfd)[hdr->sh_link] == NULL) return FALSE; if (elf_elfsections (abfd)[hdr->sh_link]->sh_type != SHT_STRTAB) { Elf_Internal_Shdr *dynsymhdr; /* The shared libraries distributed with hpux11 have a bogus sh_link field for the ".dynamic" section. Find the string table for the ".dynsym" section instead. */ if (elf_dynsymtab (abfd) != 0) { dynsymhdr = elf_elfsections (abfd)[elf_dynsymtab (abfd)]; hdr->sh_link = dynsymhdr->sh_link; } else { unsigned int i, num_sec; num_sec = elf_numsections (abfd); for (i = 1; i < num_sec; i++) { dynsymhdr = elf_elfsections (abfd)[i]; if (dynsymhdr->sh_type == SHT_DYNSYM) { hdr->sh_link = dynsymhdr->sh_link; break; } } } } break; case SHT_SYMTAB: /* A symbol table */ if (elf_onesymtab (abfd) == shindex) return TRUE; if (hdr->sh_entsize != bed->s->sizeof_sym) return FALSE; BFD_ASSERT (elf_onesymtab (abfd) == 0); elf_onesymtab (abfd) = shindex; elf_tdata (abfd)->symtab_hdr = *hdr; elf_elfsections (abfd)[shindex] = hdr = &elf_tdata (abfd)->symtab_hdr; abfd->flags |= HAS_SYMS; /* Sometimes a shared object will map in the symbol table. If SHF_ALLOC is set, and this is a shared object, then we also treat this section as a BFD section. We can not base the decision purely on SHF_ALLOC, because that flag is sometimes set in a relocatable object file, which would confuse the linker. */ if ((hdr->sh_flags & SHF_ALLOC) != 0 && (abfd->flags & DYNAMIC) != 0 && ! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex)) return FALSE; /* Go looking for SHT_SYMTAB_SHNDX too, since if there is one we can't read symbols without that section loaded as well. It is most likely specified by the next section header. */ if (elf_elfsections (abfd)[elf_symtab_shndx (abfd)]->sh_link != shindex) { unsigned int i, num_sec; num_sec = elf_numsections (abfd); for (i = shindex + 1; i < num_sec; i++) { Elf_Internal_Shdr *hdr2 = elf_elfsections (abfd)[i]; if (hdr2->sh_type == SHT_SYMTAB_SHNDX && hdr2->sh_link == shindex) break; } if (i == num_sec) for (i = 1; i < shindex; i++) { Elf_Internal_Shdr *hdr2 = elf_elfsections (abfd)[i]; if (hdr2->sh_type == SHT_SYMTAB_SHNDX && hdr2->sh_link == shindex) break; } if (i != shindex) return bfd_section_from_shdr (abfd, i); } return TRUE; case SHT_DYNSYM: /* A dynamic symbol table */ if (elf_dynsymtab (abfd) == shindex) return TRUE; if (hdr->sh_entsize != bed->s->sizeof_sym) return FALSE; BFD_ASSERT (elf_dynsymtab (abfd) == 0); elf_dynsymtab (abfd) = shindex; elf_tdata (abfd)->dynsymtab_hdr = *hdr; elf_elfsections (abfd)[shindex] = hdr = &elf_tdata (abfd)->dynsymtab_hdr; abfd->flags |= HAS_SYMS; /* Besides being a symbol table, we also treat this as a regular section, so that objcopy can handle it. */ return _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex); case SHT_SYMTAB_SHNDX: /* Symbol section indices when >64k sections */ if (elf_symtab_shndx (abfd) == shindex) return TRUE; BFD_ASSERT (elf_symtab_shndx (abfd) == 0); elf_symtab_shndx (abfd) = shindex; elf_tdata (abfd)->symtab_shndx_hdr = *hdr; elf_elfsections (abfd)[shindex] = &elf_tdata (abfd)->symtab_shndx_hdr; return TRUE; case SHT_STRTAB: /* A string table */ if (hdr->bfd_section != NULL) return TRUE; if (ehdr->e_shstrndx == shindex) { elf_tdata (abfd)->shstrtab_hdr = *hdr; elf_elfsections (abfd)[shindex] = &elf_tdata (abfd)->shstrtab_hdr; return TRUE; } if (elf_elfsections (abfd)[elf_onesymtab (abfd)]->sh_link == shindex) { symtab_strtab: elf_tdata (abfd)->strtab_hdr = *hdr; elf_elfsections (abfd)[shindex] = &elf_tdata (abfd)->strtab_hdr; return TRUE; } if (elf_elfsections (abfd)[elf_dynsymtab (abfd)]->sh_link == shindex) { dynsymtab_strtab: elf_tdata (abfd)->dynstrtab_hdr = *hdr; hdr = &elf_tdata (abfd)->dynstrtab_hdr; elf_elfsections (abfd)[shindex] = hdr; /* We also treat this as a regular section, so that objcopy can handle it. */ return _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex); } /* If the string table isn't one of the above, then treat it as a regular section. We need to scan all the headers to be sure, just in case this strtab section appeared before the above. */ if (elf_onesymtab (abfd) == 0 || elf_dynsymtab (abfd) == 0) { unsigned int i, num_sec; num_sec = elf_numsections (abfd); for (i = 1; i < num_sec; i++) { Elf_Internal_Shdr *hdr2 = elf_elfsections (abfd)[i]; if (hdr2->sh_link == shindex) { /* Prevent endless recursion on broken objects. */ if (i == shindex) return FALSE; if (! bfd_section_from_shdr (abfd, i)) return FALSE; if (elf_onesymtab (abfd) == i) goto symtab_strtab; if (elf_dynsymtab (abfd) == i) goto dynsymtab_strtab; } } } return _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex); case SHT_REL: case SHT_RELA: /* *These* do a lot of work -- but build no sections! */ { asection *target_sect; Elf_Internal_Shdr *hdr2; unsigned int num_sec = elf_numsections (abfd); if (hdr->sh_entsize != (bfd_size_type) (hdr->sh_type == SHT_REL ? bed->s->sizeof_rel : bed->s->sizeof_rela)) return FALSE; /* Check for a bogus link to avoid crashing. */ if ((hdr->sh_link >= SHN_LORESERVE && hdr->sh_link <= SHN_HIRESERVE) || hdr->sh_link >= num_sec) { ((*_bfd_error_handler) (_("%B: invalid link %lu for reloc section %s (index %u)"), abfd, hdr->sh_link, name, shindex)); return _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex); } /* For some incomprehensible reason Oracle distributes libraries for Solaris in which some of the objects have bogus sh_link fields. It would be nice if we could just reject them, but, unfortunately, some people need to use them. We scan through the section headers; if we find only one suitable symbol table, we clobber the sh_link to point to it. I hope this doesn't break anything. */ if (elf_elfsections (abfd)[hdr->sh_link]->sh_type != SHT_SYMTAB && elf_elfsections (abfd)[hdr->sh_link]->sh_type != SHT_DYNSYM) { unsigned int scan; int found; found = 0; for (scan = 1; scan < num_sec; scan++) { if (elf_elfsections (abfd)[scan]->sh_type == SHT_SYMTAB || elf_elfsections (abfd)[scan]->sh_type == SHT_DYNSYM) { if (found != 0) { found = 0; break; } found = scan; } } if (found != 0) hdr->sh_link = found; } /* Get the symbol table. */ if ((elf_elfsections (abfd)[hdr->sh_link]->sh_type == SHT_SYMTAB || elf_elfsections (abfd)[hdr->sh_link]->sh_type == SHT_DYNSYM) && ! bfd_section_from_shdr (abfd, hdr->sh_link)) return FALSE; /* If this reloc section does not use the main symbol table we don't treat it as a reloc section. BFD can't adequately represent such a section, so at least for now, we don't try. We just present it as a normal section. We also can't use it as a reloc section if it points to the null section, an invalid section, or another reloc section. */ if (hdr->sh_link != elf_onesymtab (abfd) || hdr->sh_info == SHN_UNDEF || (hdr->sh_info >= SHN_LORESERVE && hdr->sh_info <= SHN_HIRESERVE) || hdr->sh_info >= num_sec || elf_elfsections (abfd)[hdr->sh_info]->sh_type == SHT_REL || elf_elfsections (abfd)[hdr->sh_info]->sh_type == SHT_RELA) return _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex); if (! bfd_section_from_shdr (abfd, hdr->sh_info)) return FALSE; target_sect = bfd_section_from_elf_index (abfd, hdr->sh_info); if (target_sect == NULL) return FALSE; if ((target_sect->flags & SEC_RELOC) == 0 || target_sect->reloc_count == 0) hdr2 = &elf_section_data (target_sect)->rel_hdr; else { bfd_size_type amt; BFD_ASSERT (elf_section_data (target_sect)->rel_hdr2 == NULL); amt = sizeof (*hdr2); hdr2 = bfd_alloc (abfd, amt); elf_section_data (target_sect)->rel_hdr2 = hdr2; } *hdr2 = *hdr; elf_elfsections (abfd)[shindex] = hdr2; target_sect->reloc_count += NUM_SHDR_ENTRIES (hdr); target_sect->flags |= SEC_RELOC; target_sect->relocation = NULL; target_sect->rel_filepos = hdr->sh_offset; /* In the section to which the relocations apply, mark whether its relocations are of the REL or RELA variety. */ if (hdr->sh_size != 0) target_sect->use_rela_p = hdr->sh_type == SHT_RELA; abfd->flags |= HAS_RELOC; return TRUE; } case SHT_GNU_verdef: elf_dynverdef (abfd) = shindex; elf_tdata (abfd)->dynverdef_hdr = *hdr; return _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex); case SHT_GNU_versym: if (hdr->sh_entsize != sizeof (Elf_External_Versym)) return FALSE; elf_dynversym (abfd) = shindex; elf_tdata (abfd)->dynversym_hdr = *hdr; return _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex); case SHT_GNU_verneed: elf_dynverref (abfd) = shindex; elf_tdata (abfd)->dynverref_hdr = *hdr; return _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex); case SHT_SHLIB: return TRUE; case SHT_GROUP: /* We need a BFD section for objcopy and relocatable linking, and it's handy to have the signature available as the section name. */ if (! IS_VALID_GROUP_SECTION_HEADER (hdr)) return FALSE; name = group_signature (abfd, hdr); if (name == NULL) return FALSE; if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex)) return FALSE; if (hdr->contents != NULL) { Elf_Internal_Group *idx = (Elf_Internal_Group *) hdr->contents; unsigned int n_elt = hdr->sh_size / GRP_ENTRY_SIZE; asection *s; if (idx->flags & GRP_COMDAT) hdr->bfd_section->flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_DISCARD; /* We try to keep the same section order as it comes in. */ idx += n_elt; while (--n_elt != 0) { --idx; if (idx->shdr != NULL && (s = idx->shdr->bfd_section) != NULL && elf_next_in_group (s) != NULL) { elf_next_in_group (hdr->bfd_section) = s; break; } } } break; default: /* Possibly an attributes section. */ if (hdr->sh_type == SHT_GNU_ATTRIBUTES || hdr->sh_type == bed->obj_attrs_section_type) { if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex)) return FALSE; _bfd_elf_parse_attributes (abfd, hdr); return TRUE; } /* Check for any processor-specific section types. */ if (bed->elf_backend_section_from_shdr (abfd, hdr, name, shindex)) return TRUE; if (hdr->sh_type >= SHT_LOUSER && hdr->sh_type <= SHT_HIUSER) { if ((hdr->sh_flags & SHF_ALLOC) != 0) /* FIXME: How to properly handle allocated section reserved for applications? */ (*_bfd_error_handler) (_("%B: don't know how to handle allocated, application " "specific section `%s' [0x%8x]"), abfd, name, hdr->sh_type); else /* Allow sections reserved for applications. */ return _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex); } else if (hdr->sh_type >= SHT_LOPROC && hdr->sh_type <= SHT_HIPROC) /* FIXME: We should handle this section. */ (*_bfd_error_handler) (_("%B: don't know how to handle processor specific section " "`%s' [0x%8x]"), abfd, name, hdr->sh_type); else if (hdr->sh_type >= SHT_LOOS && hdr->sh_type <= SHT_HIOS) { /* Unrecognised OS-specific sections. */ if ((hdr->sh_flags & SHF_OS_NONCONFORMING) != 0) /* SHF_OS_NONCONFORMING indicates that special knowledge is required to correctly process the section and the file should be rejected with an error message. */ (*_bfd_error_handler) (_("%B: don't know how to handle OS specific section " "`%s' [0x%8x]"), abfd, name, hdr->sh_type); else /* Otherwise it should be processed. */ return _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex); } else /* FIXME: We should handle this section. */ (*_bfd_error_handler) (_("%B: don't know how to handle section `%s' [0x%8x]"), abfd, name, hdr->sh_type); return FALSE; } return TRUE; } /* Return the section for the local symbol specified by ABFD, R_SYMNDX. Return SEC for sections that have no elf section, and NULL on error. */ asection * bfd_section_from_r_symndx (bfd *abfd, struct sym_sec_cache *cache, asection *sec, unsigned long r_symndx) { Elf_Internal_Shdr *symtab_hdr; unsigned char esym[sizeof (Elf64_External_Sym)]; Elf_External_Sym_Shndx eshndx; Elf_Internal_Sym isym; unsigned int ent = r_symndx % LOCAL_SYM_CACHE_SIZE; if (cache->abfd == abfd && cache->indx[ent] == r_symndx) return cache->sec[ent]; symtab_hdr = &elf_tdata (abfd)->symtab_hdr; if (bfd_elf_get_elf_syms (abfd, symtab_hdr, 1, r_symndx, &isym, esym, &eshndx) == NULL) return NULL; if (cache->abfd != abfd) { memset (cache->indx, -1, sizeof (cache->indx)); cache->abfd = abfd; } cache->indx[ent] = r_symndx; cache->sec[ent] = sec; if ((isym.st_shndx != SHN_UNDEF && isym.st_shndx < SHN_LORESERVE) || isym.st_shndx > SHN_HIRESERVE) { asection *s; s = bfd_section_from_elf_index (abfd, isym.st_shndx); if (s != NULL) cache->sec[ent] = s; } return cache->sec[ent]; } /* Given an ELF section number, retrieve the corresponding BFD section. */ asection * bfd_section_from_elf_index (bfd *abfd, unsigned int index) { if (index >= elf_numsections (abfd)) return NULL; return elf_elfsections (abfd)[index]->bfd_section; } static const struct bfd_elf_special_section special_sections_b[] = { { STRING_COMMA_LEN (".bss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE }, { NULL, 0, 0, 0, 0 } }; static const struct bfd_elf_special_section special_sections_c[] = { { STRING_COMMA_LEN (".comment"), 0, SHT_PROGBITS, 0 }, { NULL, 0, 0, 0, 0 } }; static const struct bfd_elf_special_section special_sections_d[] = { { STRING_COMMA_LEN (".data"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE }, { STRING_COMMA_LEN (".data1"), 0, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE }, { STRING_COMMA_LEN (".debug"), 0, SHT_PROGBITS, 0 }, { STRING_COMMA_LEN (".debug_line"), 0, SHT_PROGBITS, 0 }, { STRING_COMMA_LEN (".debug_info"), 0, SHT_PROGBITS, 0 }, { STRING_COMMA_LEN (".debug_abbrev"), 0, SHT_PROGBITS, 0 }, { STRING_COMMA_LEN (".debug_aranges"), 0, SHT_PROGBITS, 0 }, { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, SHF_ALLOC }, { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, SHF_ALLOC }, { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, SHF_ALLOC }, { NULL, 0, 0, 0, 0 } }; static const struct bfd_elf_special_section special_sections_f[] = { { STRING_COMMA_LEN (".fini"), 0, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR }, { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC + SHF_WRITE }, { NULL, 0, 0, 0, 0 } }; static const struct bfd_elf_special_section special_sections_g[] = { { STRING_COMMA_LEN (".gnu.linkonce.b"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE }, { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE }, { STRING_COMMA_LEN (".gnu.version"), 0, SHT_GNU_versym, 0 }, { STRING_COMMA_LEN (".gnu.version_d"), 0, SHT_GNU_verdef, 0 }, { STRING_COMMA_LEN (".gnu.version_r"), 0, SHT_GNU_verneed, 0 }, { STRING_COMMA_LEN (".gnu.liblist"), 0, SHT_GNU_LIBLIST, SHF_ALLOC }, { STRING_COMMA_LEN (".gnu.conflict"), 0, SHT_RELA, SHF_ALLOC }, { STRING_COMMA_LEN (".gnu.hash"), 0, SHT_GNU_HASH, SHF_ALLOC }, { NULL, 0, 0, 0, 0 } }; static const struct bfd_elf_special_section special_sections_h[] = { { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, SHF_ALLOC }, { NULL, 0, 0, 0, 0 } }; static const struct bfd_elf_special_section special_sections_i[] = { { STRING_COMMA_LEN (".init"), 0, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR }, { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC + SHF_WRITE }, { STRING_COMMA_LEN (".interp"), 0, SHT_PROGBITS, 0 }, { NULL, 0, 0, 0, 0 } }; static const struct bfd_elf_special_section special_sections_l[] = { { STRING_COMMA_LEN (".line"), 0, SHT_PROGBITS, 0 }, { NULL, 0, 0, 0, 0 } }; static const struct bfd_elf_special_section special_sections_n[] = { { STRING_COMMA_LEN (".note.GNU-stack"), 0, SHT_PROGBITS, 0 }, { STRING_COMMA_LEN (".note"), -1, SHT_NOTE, 0 }, { NULL, 0, 0, 0, 0 } }; static const struct bfd_elf_special_section special_sections_p[] = { { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC + SHF_WRITE }, { STRING_COMMA_LEN (".plt"), 0, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR }, { NULL, 0, 0, 0, 0 } }; static const struct bfd_elf_special_section special_sections_r[] = { { STRING_COMMA_LEN (".rodata"), -2, SHT_PROGBITS, SHF_ALLOC }, { STRING_COMMA_LEN (".rodata1"), 0, SHT_PROGBITS, SHF_ALLOC }, { STRING_COMMA_LEN (".rela"), -1, SHT_RELA, 0 }, { STRING_COMMA_LEN (".rel"), -1, SHT_REL, 0 }, { NULL, 0, 0, 0, 0 } }; static const struct bfd_elf_special_section special_sections_s[] = { { STRING_COMMA_LEN (".shstrtab"), 0, SHT_STRTAB, 0 }, { STRING_COMMA_LEN (".strtab"), 0, SHT_STRTAB, 0 }, { STRING_COMMA_LEN (".symtab"), 0, SHT_SYMTAB, 0 }, /* See struct bfd_elf_special_section declaration for the semantics of this special case where .prefix_length != strlen (.prefix). */ { ".stabstr", 5, 3, SHT_STRTAB, 0 }, { NULL, 0, 0, 0, 0 } }; static const struct bfd_elf_special_section special_sections_t[] = { { STRING_COMMA_LEN (".text"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR }, { STRING_COMMA_LEN (".tbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_TLS }, { STRING_COMMA_LEN (".tdata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_TLS }, { NULL, 0, 0, 0, 0 } }; static const struct bfd_elf_special_section *special_sections[] = { special_sections_b, /* 'b' */ special_sections_c, /* 'b' */ special_sections_d, /* 'd' */ NULL, /* 'e' */ special_sections_f, /* 'f' */ special_sections_g, /* 'g' */ special_sections_h, /* 'h' */ special_sections_i, /* 'i' */ NULL, /* 'j' */ NULL, /* 'k' */ special_sections_l, /* 'l' */ NULL, /* 'm' */ special_sections_n, /* 'n' */ NULL, /* 'o' */ special_sections_p, /* 'p' */ NULL, /* 'q' */ special_sections_r, /* 'r' */ special_sections_s, /* 's' */ special_sections_t, /* 't' */ }; const struct bfd_elf_special_section * _bfd_elf_get_special_section (const char *name, const struct bfd_elf_special_section *spec, unsigned int rela) { int i; int len; len = strlen (name); for (i = 0; spec[i].prefix != NULL; i++) { int suffix_len; int prefix_len = spec[i].prefix_length; if (len < prefix_len) continue; if (memcmp (name, spec[i].prefix, prefix_len) != 0) continue; suffix_len = spec[i].suffix_length; if (suffix_len <= 0) { if (name[prefix_len] != 0) { if (suffix_len == 0) continue; if (name[prefix_len] != '.' && (suffix_len == -2 || (rela && spec[i].type == SHT_REL))) continue; } } else { if (len < prefix_len + suffix_len) continue; if (memcmp (name + len - suffix_len, spec[i].prefix + prefix_len, suffix_len) != 0) continue; } return &spec[i]; } return NULL; } const struct bfd_elf_special_section * _bfd_elf_get_sec_type_attr (bfd *abfd, asection *sec) { int i; const struct bfd_elf_special_section *spec; const struct elf_backend_data *bed; /* See if this is one of the special sections. */ if (sec->name == NULL) return NULL; bed = get_elf_backend_data (abfd); spec = bed->special_sections; if (spec) { spec = _bfd_elf_get_special_section (sec->name, bed->special_sections, sec->use_rela_p); if (spec != NULL) return spec; } if (sec->name[0] != '.') return NULL; i = sec->name[1] - 'b'; if (i < 0 || i > 't' - 'b') return NULL; spec = special_sections[i]; if (spec == NULL) return NULL; return _bfd_elf_get_special_section (sec->name, spec, sec->use_rela_p); } bfd_boolean _bfd_elf_new_section_hook (bfd *abfd, asection *sec) { struct bfd_elf_section_data *sdata; const struct elf_backend_data *bed; const struct bfd_elf_special_section *ssect; sdata = (struct bfd_elf_section_data *) sec->used_by_bfd; if (sdata == NULL) { sdata = bfd_zalloc (abfd, sizeof (*sdata)); if (sdata == NULL) return FALSE; sec->used_by_bfd = sdata; } /* Indicate whether or not this section should use RELA relocations. */ bed = get_elf_backend_data (abfd); sec->use_rela_p = bed->default_use_rela_p; /* When we read a file, we don't need to set ELF section type and flags. They will be overridden in _bfd_elf_make_section_from_shdr anyway. We will set ELF section type and flags for all linker created sections. If user specifies BFD section flags, we will set ELF section type and flags based on BFD section flags in elf_fake_sections. */ if ((!sec->flags && abfd->direction != read_direction) || (sec->flags & SEC_LINKER_CREATED) != 0) { ssect = (*bed->get_sec_type_attr) (abfd, sec); if (ssect != NULL) { elf_section_type (sec) = ssect->type; elf_section_flags (sec) = ssect->attr; } } return _bfd_generic_new_section_hook (abfd, sec); } /* Create a new bfd section from an ELF program header. Since program segments have no names, we generate a synthetic name of the form segment, where NUM is generally the index in the program header table. For segments that are split (see below) we generate the names segmenta and segmentb. Note that some program segments may have a file size that is different than (less than) the memory size. All this means is that at execution the system must allocate the amount of memory specified by the memory size, but only initialize it with the first "file size" bytes read from the file. This would occur for example, with program segments consisting of combined data+bss. To handle the above situation, this routine generates TWO bfd sections for the single program segment. The first has the length specified by the file size of the segment, and the second has the length specified by the difference between the two sizes. In effect, the segment is split into it's initialized and uninitialized parts. */ bfd_boolean _bfd_elf_make_section_from_phdr (bfd *abfd, Elf_Internal_Phdr *hdr, int index, const char *typename) { asection *newsect; char *name; char namebuf[64]; size_t len; int split; split = ((hdr->p_memsz > 0) && (hdr->p_filesz > 0) && (hdr->p_memsz > hdr->p_filesz)); sprintf (namebuf, "%s%d%s", typename, index, split ? "a" : ""); len = strlen (namebuf) + 1; name = bfd_alloc (abfd, len); if (!name) return FALSE; memcpy (name, namebuf, len); newsect = bfd_make_section (abfd, name); if (newsect == NULL) return FALSE; newsect->vma = hdr->p_vaddr; newsect->lma = hdr->p_paddr; newsect->size = hdr->p_filesz; newsect->filepos = hdr->p_offset; newsect->flags |= SEC_HAS_CONTENTS; newsect->alignment_power = bfd_log2 (hdr->p_align); if (hdr->p_type == PT_LOAD) { newsect->flags |= SEC_ALLOC; newsect->flags |= SEC_LOAD; if (hdr->p_flags & PF_X) { /* FIXME: all we known is that it has execute PERMISSION, may be data. */ newsect->flags |= SEC_CODE; } } if (!(hdr->p_flags & PF_W)) { newsect->flags |= SEC_READONLY; } if (split) { sprintf (namebuf, "%s%db", typename, index); len = strlen (namebuf) + 1; name = bfd_alloc (abfd, len); if (!name) return FALSE; memcpy (name, namebuf, len); newsect = bfd_make_section (abfd, name); if (newsect == NULL) return FALSE; newsect->vma = hdr->p_vaddr + hdr->p_filesz; newsect->lma = hdr->p_paddr + hdr->p_filesz; newsect->size = hdr->p_memsz - hdr->p_filesz; if (hdr->p_type == PT_LOAD) { newsect->flags |= SEC_ALLOC; if (hdr->p_flags & PF_X) newsect->flags |= SEC_CODE; } if (!(hdr->p_flags & PF_W)) newsect->flags |= SEC_READONLY; } return TRUE; } bfd_boolean bfd_section_from_phdr (bfd *abfd, Elf_Internal_Phdr *hdr, int index) { const struct elf_backend_data *bed; switch (hdr->p_type) { case PT_NULL: return _bfd_elf_make_section_from_phdr (abfd, hdr, index, "null"); case PT_LOAD: return _bfd_elf_make_section_from_phdr (abfd, hdr, index, "load"); case PT_DYNAMIC: return _bfd_elf_make_section_from_phdr (abfd, hdr, index, "dynamic"); case PT_INTERP: return _bfd_elf_make_section_from_phdr (abfd, hdr, index, "interp"); case PT_NOTE: if (! _bfd_elf_make_section_from_phdr (abfd, hdr, index, "note")) return FALSE; if (! elfcore_read_notes (abfd, hdr->p_offset, hdr->p_filesz)) return FALSE; return TRUE; case PT_SHLIB: return _bfd_elf_make_section_from_phdr (abfd, hdr, index, "shlib"); case PT_PHDR: return _bfd_elf_make_section_from_phdr (abfd, hdr, index, "phdr"); case PT_GNU_EH_FRAME: return _bfd_elf_make_section_from_phdr (abfd, hdr, index, "eh_frame_hdr"); case PT_GNU_STACK: return _bfd_elf_make_section_from_phdr (abfd, hdr, index, "stack"); case PT_GNU_RELRO: return _bfd_elf_make_section_from_phdr (abfd, hdr, index, "relro"); default: /* Check for any processor-specific program segment types. */ bed = get_elf_backend_data (abfd); return bed->elf_backend_section_from_phdr (abfd, hdr, index, "proc"); } } /* Initialize REL_HDR, the section-header for new section, containing relocations against ASECT. If USE_RELA_P is TRUE, we use RELA relocations; otherwise, we use REL relocations. */ bfd_boolean _bfd_elf_init_reloc_shdr (bfd *abfd, Elf_Internal_Shdr *rel_hdr, asection *asect, bfd_boolean use_rela_p) { char *name; const struct elf_backend_data *bed = get_elf_backend_data (abfd); bfd_size_type amt = sizeof ".rela" + strlen (asect->name); name = bfd_alloc (abfd, amt); if (name == NULL) return FALSE; sprintf (name, "%s%s", use_rela_p ? ".rela" : ".rel", asect->name); rel_hdr->sh_name = (unsigned int) _bfd_elf_strtab_add (elf_shstrtab (abfd), name, FALSE); if (rel_hdr->sh_name == (unsigned int) -1) return FALSE; rel_hdr->sh_type = use_rela_p ? SHT_RELA : SHT_REL; rel_hdr->sh_entsize = (use_rela_p ? bed->s->sizeof_rela : bed->s->sizeof_rel); rel_hdr->sh_addralign = 1 << bed->s->log_file_align; rel_hdr->sh_flags = 0; rel_hdr->sh_addr = 0; rel_hdr->sh_size = 0; rel_hdr->sh_offset = 0; return TRUE; } /* Set up an ELF internal section header for a section. */ static void elf_fake_sections (bfd *abfd, asection *asect, void *failedptrarg) { const struct elf_backend_data *bed = get_elf_backend_data (abfd); bfd_boolean *failedptr = failedptrarg; Elf_Internal_Shdr *this_hdr; unsigned int sh_type; if (*failedptr) { /* We already failed; just get out of the bfd_map_over_sections loop. */ return; } this_hdr = &elf_section_data (asect)->this_hdr; this_hdr->sh_name = (unsigned int) _bfd_elf_strtab_add (elf_shstrtab (abfd), asect->name, FALSE); if (this_hdr->sh_name == (unsigned int) -1) { *failedptr = TRUE; return; } /* Don't clear sh_flags. Assembler may set additional bits. */ if ((asect->flags & SEC_ALLOC) != 0 || asect->user_set_vma) this_hdr->sh_addr = asect->vma; else this_hdr->sh_addr = 0; this_hdr->sh_offset = 0; this_hdr->sh_size = asect->size; this_hdr->sh_link = 0; this_hdr->sh_addralign = 1 << asect->alignment_power; /* The sh_entsize and sh_info fields may have been set already by copy_private_section_data. */ this_hdr->bfd_section = asect; this_hdr->contents = NULL; /* If the section type is unspecified, we set it based on asect->flags. */ if (this_hdr->sh_type == SHT_NULL) { if ((asect->flags & SEC_GROUP) != 0) this_hdr->sh_type = SHT_GROUP; else if ((asect->flags & SEC_ALLOC) != 0 && (((asect->flags & (SEC_LOAD | SEC_HAS_CONTENTS)) == 0) || (asect->flags & SEC_NEVER_LOAD) != 0)) this_hdr->sh_type = SHT_NOBITS; else this_hdr->sh_type = SHT_PROGBITS; } switch (this_hdr->sh_type) { default: break; case SHT_STRTAB: case SHT_INIT_ARRAY: case SHT_FINI_ARRAY: case SHT_PREINIT_ARRAY: case SHT_NOTE: case SHT_NOBITS: case SHT_PROGBITS: break; case SHT_HASH: this_hdr->sh_entsize = bed->s->sizeof_hash_entry; break; case SHT_DYNSYM: this_hdr->sh_entsize = bed->s->sizeof_sym; break; case SHT_DYNAMIC: this_hdr->sh_entsize = bed->s->sizeof_dyn; break; case SHT_RELA: if (get_elf_backend_data (abfd)->may_use_rela_p) this_hdr->sh_entsize = bed->s->sizeof_rela; break; case SHT_REL: if (get_elf_backend_data (abfd)->may_use_rel_p) this_hdr->sh_entsize = bed->s->sizeof_rel; break; case SHT_GNU_versym: this_hdr->sh_entsize = sizeof (Elf_External_Versym); break; case SHT_GNU_verdef: this_hdr->sh_entsize = 0; /* objcopy or strip will copy over sh_info, but may not set cverdefs. The linker will set cverdefs, but sh_info will be zero. */ if (this_hdr->sh_info == 0) this_hdr->sh_info = elf_tdata (abfd)->cverdefs; else BFD_ASSERT (elf_tdata (abfd)->cverdefs == 0 || this_hdr->sh_info == elf_tdata (abfd)->cverdefs); break; case SHT_GNU_verneed: this_hdr->sh_entsize = 0; /* objcopy or strip will copy over sh_info, but may not set cverrefs. The linker will set cverrefs, but sh_info will be zero. */ if (this_hdr->sh_info == 0) this_hdr->sh_info = elf_tdata (abfd)->cverrefs; else BFD_ASSERT (elf_tdata (abfd)->cverrefs == 0 || this_hdr->sh_info == elf_tdata (abfd)->cverrefs); break; case SHT_GROUP: this_hdr->sh_entsize = GRP_ENTRY_SIZE; break; case SHT_GNU_HASH: this_hdr->sh_entsize = bed->s->arch_size == 64 ? 0 : 4; break; } if ((asect->flags & SEC_ALLOC) != 0) this_hdr->sh_flags |= SHF_ALLOC; if ((asect->flags & SEC_READONLY) == 0) this_hdr->sh_flags |= SHF_WRITE; if ((asect->flags & SEC_CODE) != 0) this_hdr->sh_flags |= SHF_EXECINSTR; if ((asect->flags & SEC_MERGE) != 0) { this_hdr->sh_flags |= SHF_MERGE; this_hdr->sh_entsize = asect->entsize; if ((asect->flags & SEC_STRINGS) != 0) this_hdr->sh_flags |= SHF_STRINGS; } if ((asect->flags & SEC_GROUP) == 0 && elf_group_name (asect) != NULL) this_hdr->sh_flags |= SHF_GROUP; if ((asect->flags & SEC_THREAD_LOCAL) != 0) { this_hdr->sh_flags |= SHF_TLS; if (asect->size == 0 && (asect->flags & SEC_HAS_CONTENTS) == 0) { struct bfd_link_order *o = asect->map_tail.link_order; this_hdr->sh_size = 0; if (o != NULL) { this_hdr->sh_size = o->offset + o->size; if (this_hdr->sh_size != 0) this_hdr->sh_type = SHT_NOBITS; } } } /* Check for processor-specific section types. */ sh_type = this_hdr->sh_type; if (bed->elf_backend_fake_sections && !(*bed->elf_backend_fake_sections) (abfd, this_hdr, asect)) *failedptr = TRUE; if (sh_type == SHT_NOBITS && asect->size != 0) { /* Don't change the header type from NOBITS if we are being called for objcopy --only-keep-debug. */ this_hdr->sh_type = sh_type; } /* If the section has relocs, set up a section header for the SHT_REL[A] section. If two relocation sections are required for this section, it is up to the processor-specific back-end to create the other. */ if ((asect->flags & SEC_RELOC) != 0 && !_bfd_elf_init_reloc_shdr (abfd, &elf_section_data (asect)->rel_hdr, asect, asect->use_rela_p)) *failedptr = TRUE; } /* Fill in the contents of a SHT_GROUP section. */ void bfd_elf_set_group_contents (bfd *abfd, asection *sec, void *failedptrarg) { bfd_boolean *failedptr = failedptrarg; unsigned long symindx; asection *elt, *first; unsigned char *loc; bfd_boolean gas; /* Ignore linker created group section. See elfNN_ia64_object_p in elfxx-ia64.c. */ if (((sec->flags & (SEC_GROUP | SEC_LINKER_CREATED)) != SEC_GROUP) || *failedptr) return; symindx = 0; if (elf_group_id (sec) != NULL) symindx = elf_group_id (sec)->udata.i; if (symindx == 0) { /* If called from the assembler, swap_out_syms will have set up elf_section_syms; If called for "ld -r", use target_index. */ if (elf_section_syms (abfd) != NULL) symindx = elf_section_syms (abfd)[sec->index]->udata.i; else symindx = sec->target_index; } elf_section_data (sec)->this_hdr.sh_info = symindx; /* The contents won't be allocated for "ld -r" or objcopy. */ gas = TRUE; if (sec->contents == NULL) { gas = FALSE; sec->contents = bfd_alloc (abfd, sec->size); /* Arrange for the section to be written out. */ elf_section_data (sec)->this_hdr.contents = sec->contents; if (sec->contents == NULL) { *failedptr = TRUE; return; } } loc = sec->contents + sec->size; /* Get the pointer to the first section in the group that gas squirreled away here. objcopy arranges for this to be set to the start of the input section group. */ first = elt = elf_next_in_group (sec); /* First element is a flag word. Rest of section is elf section indices for all the sections of the group. Write them backwards just to keep the group in the same order as given in .section directives, not that it matters. */ while (elt != NULL) { asection *s; unsigned int idx; loc -= 4; s = elt; if (!gas) s = s->output_section; idx = 0; if (s != NULL) idx = elf_section_data (s)->this_idx; H_PUT_32 (abfd, idx, loc); elt = elf_next_in_group (elt); if (elt == first) break; } if ((loc -= 4) != sec->contents) abort (); H_PUT_32 (abfd, sec->flags & SEC_LINK_ONCE ? GRP_COMDAT : 0, loc); } /* Assign all ELF section numbers. The dummy first section is handled here too. The link/info pointers for the standard section types are filled in here too, while we're at it. */ static bfd_boolean assign_section_numbers (bfd *abfd, struct bfd_link_info *link_info) { struct elf_obj_tdata *t = elf_tdata (abfd); asection *sec; unsigned int section_number, secn; Elf_Internal_Shdr **i_shdrp; struct bfd_elf_section_data *d; section_number = 1; _bfd_elf_strtab_clear_all_refs (elf_shstrtab (abfd)); /* SHT_GROUP sections are in relocatable files only. */ if (link_info == NULL || link_info->relocatable) { /* Put SHT_GROUP sections first. */ for (sec = abfd->sections; sec != NULL; sec = sec->next) { d = elf_section_data (sec); if (d->this_hdr.sh_type == SHT_GROUP) { if (sec->flags & SEC_LINKER_CREATED) { /* Remove the linker created SHT_GROUP sections. */ bfd_section_list_remove (abfd, sec); abfd->section_count--; } else { if (section_number == SHN_LORESERVE) section_number += SHN_HIRESERVE + 1 - SHN_LORESERVE; d->this_idx = section_number++; } } } } for (sec = abfd->sections; sec; sec = sec->next) { d = elf_section_data (sec); if (d->this_hdr.sh_type != SHT_GROUP) { if (section_number == SHN_LORESERVE) section_number += SHN_HIRESERVE + 1 - SHN_LORESERVE; d->this_idx = section_number++; } _bfd_elf_strtab_addref (elf_shstrtab (abfd), d->this_hdr.sh_name); if ((sec->flags & SEC_RELOC) == 0) d->rel_idx = 0; else { if (section_number == SHN_LORESERVE) section_number += SHN_HIRESERVE + 1 - SHN_LORESERVE; d->rel_idx = section_number++; _bfd_elf_strtab_addref (elf_shstrtab (abfd), d->rel_hdr.sh_name); } if (d->rel_hdr2) { if (section_number == SHN_LORESERVE) section_number += SHN_HIRESERVE + 1 - SHN_LORESERVE; d->rel_idx2 = section_number++; _bfd_elf_strtab_addref (elf_shstrtab (abfd), d->rel_hdr2->sh_name); } else d->rel_idx2 = 0; } if (section_number == SHN_LORESERVE) section_number += SHN_HIRESERVE + 1 - SHN_LORESERVE; t->shstrtab_section = section_number++; _bfd_elf_strtab_addref (elf_shstrtab (abfd), t->shstrtab_hdr.sh_name); elf_elfheader (abfd)->e_shstrndx = t->shstrtab_section; if (bfd_get_symcount (abfd) > 0) { if (section_number == SHN_LORESERVE) section_number += SHN_HIRESERVE + 1 - SHN_LORESERVE; t->symtab_section = section_number++; _bfd_elf_strtab_addref (elf_shstrtab (abfd), t->symtab_hdr.sh_name); if (section_number > SHN_LORESERVE - 2) { if (section_number == SHN_LORESERVE) section_number += SHN_HIRESERVE + 1 - SHN_LORESERVE; t->symtab_shndx_section = section_number++; t->symtab_shndx_hdr.sh_name = (unsigned int) _bfd_elf_strtab_add (elf_shstrtab (abfd), ".symtab_shndx", FALSE); if (t->symtab_shndx_hdr.sh_name == (unsigned int) -1) return FALSE; } if (section_number == SHN_LORESERVE) section_number += SHN_HIRESERVE + 1 - SHN_LORESERVE; t->strtab_section = section_number++; _bfd_elf_strtab_addref (elf_shstrtab (abfd), t->strtab_hdr.sh_name); } _bfd_elf_strtab_finalize (elf_shstrtab (abfd)); t->shstrtab_hdr.sh_size = _bfd_elf_strtab_size (elf_shstrtab (abfd)); elf_numsections (abfd) = section_number; elf_elfheader (abfd)->e_shnum = section_number; if (section_number > SHN_LORESERVE) elf_elfheader (abfd)->e_shnum -= SHN_HIRESERVE + 1 - SHN_LORESERVE; /* Set up the list of section header pointers, in agreement with the indices. */ i_shdrp = bfd_zalloc2 (abfd, section_number, sizeof (Elf_Internal_Shdr *)); if (i_shdrp == NULL) return FALSE; i_shdrp[0] = bfd_zalloc (abfd, sizeof (Elf_Internal_Shdr)); if (i_shdrp[0] == NULL) { bfd_release (abfd, i_shdrp); return FALSE; } elf_elfsections (abfd) = i_shdrp; i_shdrp[t->shstrtab_section] = &t->shstrtab_hdr; if (bfd_get_symcount (abfd) > 0) { i_shdrp[t->symtab_section] = &t->symtab_hdr; if (elf_numsections (abfd) > SHN_LORESERVE) { i_shdrp[t->symtab_shndx_section] = &t->symtab_shndx_hdr; t->symtab_shndx_hdr.sh_link = t->symtab_section; } i_shdrp[t->strtab_section] = &t->strtab_hdr; t->symtab_hdr.sh_link = t->strtab_section; } for (sec = abfd->sections; sec; sec = sec->next) { struct bfd_elf_section_data *d = elf_section_data (sec); asection *s; const char *name; i_shdrp[d->this_idx] = &d->this_hdr; if (d->rel_idx != 0) i_shdrp[d->rel_idx] = &d->rel_hdr; if (d->rel_idx2 != 0) i_shdrp[d->rel_idx2] = d->rel_hdr2; /* Fill in the sh_link and sh_info fields while we're at it. */ /* sh_link of a reloc section is the section index of the symbol table. sh_info is the section index of the section to which the relocation entries apply. */ if (d->rel_idx != 0) { d->rel_hdr.sh_link = t->symtab_section; d->rel_hdr.sh_info = d->this_idx; } if (d->rel_idx2 != 0) { d->rel_hdr2->sh_link = t->symtab_section; d->rel_hdr2->sh_info = d->this_idx; } /* We need to set up sh_link for SHF_LINK_ORDER. */ if ((d->this_hdr.sh_flags & SHF_LINK_ORDER) != 0) { s = elf_linked_to_section (sec); if (s) { /* elf_linked_to_section points to the input section. */ if (link_info != NULL) { /* Check discarded linkonce section. */ if (elf_discarded_section (s)) { asection *kept; (*_bfd_error_handler) (_("%B: sh_link of section `%A' points to discarded section `%A' of `%B'"), abfd, d->this_hdr.bfd_section, s, s->owner); /* Point to the kept section if it has the same size as the discarded one. */ kept = _bfd_elf_check_kept_section (s, link_info); if (kept == NULL) { bfd_set_error (bfd_error_bad_value); return FALSE; } s = kept; } s = s->output_section; BFD_ASSERT (s != NULL); } else { /* Handle objcopy. */ if (s->output_section == NULL) { (*_bfd_error_handler) (_("%B: sh_link of section `%A' points to removed section `%A' of `%B'"), abfd, d->this_hdr.bfd_section, s, s->owner); bfd_set_error (bfd_error_bad_value); return FALSE; } s = s->output_section; } d->this_hdr.sh_link = elf_section_data (s)->this_idx; } else { /* PR 290: The Intel C compiler generates SHT_IA_64_UNWIND with SHF_LINK_ORDER. But it doesn't set the sh_link or sh_info fields. Hence we could get the situation where s is NULL. */ const struct elf_backend_data *bed = get_elf_backend_data (abfd); if (bed->link_order_error_handler) bed->link_order_error_handler (_("%B: warning: sh_link not set for section `%A'"), abfd, sec); } } switch (d->this_hdr.sh_type) { case SHT_REL: case SHT_RELA: /* A reloc section which we are treating as a normal BFD section. sh_link is the section index of the symbol table. sh_info is the section index of the section to which the relocation entries apply. We assume that an allocated reloc section uses the dynamic symbol table. FIXME: How can we be sure? */ s = bfd_get_section_by_name (abfd, ".dynsym"); if (s != NULL) d->this_hdr.sh_link = elf_section_data (s)->this_idx; /* We look up the section the relocs apply to by name. */ name = sec->name; if (d->this_hdr.sh_type == SHT_REL) name += 4; else name += 5; s = bfd_get_section_by_name (abfd, name); if (s != NULL) d->this_hdr.sh_info = elf_section_data (s)->this_idx; break; case SHT_STRTAB: /* We assume that a section named .stab*str is a stabs string section. We look for a section with the same name but without the trailing ``str'', and set its sh_link field to point to this section. */ if (CONST_STRNEQ (sec->name, ".stab") && strcmp (sec->name + strlen (sec->name) - 3, "str") == 0) { size_t len; char *alc; len = strlen (sec->name); alc = bfd_malloc (len - 2); if (alc == NULL) return FALSE; memcpy (alc, sec->name, len - 3); alc[len - 3] = '\0'; s = bfd_get_section_by_name (abfd, alc); free (alc); if (s != NULL) { elf_section_data (s)->this_hdr.sh_link = d->this_idx; /* This is a .stab section. */ if (elf_section_data (s)->this_hdr.sh_entsize == 0) elf_section_data (s)->this_hdr.sh_entsize = 4 + 2 * bfd_get_arch_size (abfd) / 8; } } break; case SHT_DYNAMIC: case SHT_DYNSYM: case SHT_GNU_verneed: case SHT_GNU_verdef: /* sh_link is the section header index of the string table used for the dynamic entries, or the symbol table, or the version strings. */ s = bfd_get_section_by_name (abfd, ".dynstr"); if (s != NULL) d->this_hdr.sh_link = elf_section_data (s)->this_idx; break; case SHT_GNU_LIBLIST: /* sh_link is the section header index of the prelink library list used for the dynamic entries, or the symbol table, or the version strings. */ s = bfd_get_section_by_name (abfd, (sec->flags & SEC_ALLOC) ? ".dynstr" : ".gnu.libstr"); if (s != NULL) d->this_hdr.sh_link = elf_section_data (s)->this_idx; break; case SHT_HASH: case SHT_GNU_HASH: case SHT_GNU_versym: /* sh_link is the section header index of the symbol table this hash table or version table is for. */ s = bfd_get_section_by_name (abfd, ".dynsym"); if (s != NULL) d->this_hdr.sh_link = elf_section_data (s)->this_idx; break; case SHT_GROUP: d->this_hdr.sh_link = t->symtab_section; } } for (secn = 1; secn < section_number; ++secn) if (i_shdrp[secn] == NULL) i_shdrp[secn] = i_shdrp[0]; else i_shdrp[secn]->sh_name = _bfd_elf_strtab_offset (elf_shstrtab (abfd), i_shdrp[secn]->sh_name); return TRUE; } /* Map symbol from it's internal number to the external number, moving all local symbols to be at the head of the list. */ static bfd_boolean sym_is_global (bfd *abfd, asymbol *sym) { /* If the backend has a special mapping, use it. */ const struct elf_backend_data *bed = get_elf_backend_data (abfd); if (bed->elf_backend_sym_is_global) return (*bed->elf_backend_sym_is_global) (abfd, sym); return ((sym->flags & (BSF_GLOBAL | BSF_WEAK)) != 0 || bfd_is_und_section (bfd_get_section (sym)) || bfd_is_com_section (bfd_get_section (sym))); } /* Don't output section symbols for sections that are not going to be output. Also, don't output section symbols for reloc and other special sections. */ static bfd_boolean ignore_section_sym (bfd *abfd, asymbol *sym) { return ((sym->flags & BSF_SECTION_SYM) != 0 && (sym->value != 0 || (sym->section->owner != abfd && (sym->section->output_section->owner != abfd || sym->section->output_offset != 0)))); } static bfd_boolean elf_map_symbols (bfd *abfd) { unsigned int symcount = bfd_get_symcount (abfd); asymbol **syms = bfd_get_outsymbols (abfd); asymbol **sect_syms; unsigned int num_locals = 0; unsigned int num_globals = 0; unsigned int num_locals2 = 0; unsigned int num_globals2 = 0; int max_index = 0; unsigned int idx; asection *asect; asymbol **new_syms; #ifdef DEBUG fprintf (stderr, "elf_map_symbols\n"); fflush (stderr); #endif for (asect = abfd->sections; asect; asect = asect->next) { if (max_index < asect->index) max_index = asect->index; } max_index++; sect_syms = bfd_zalloc2 (abfd, max_index, sizeof (asymbol *)); if (sect_syms == NULL) return FALSE; elf_section_syms (abfd) = sect_syms; elf_num_section_syms (abfd) = max_index; /* Init sect_syms entries for any section symbols we have already decided to output. */ for (idx = 0; idx < symcount; idx++) { asymbol *sym = syms[idx]; if ((sym->flags & BSF_SECTION_SYM) != 0 && !ignore_section_sym (abfd, sym)) { asection *sec = sym->section; if (sec->owner != abfd) sec = sec->output_section; sect_syms[sec->index] = syms[idx]; } } /* Classify all of the symbols. */ for (idx = 0; idx < symcount; idx++) { if (ignore_section_sym (abfd, syms[idx])) continue; if (!sym_is_global (abfd, syms[idx])) num_locals++; else num_globals++; } /* We will be adding a section symbol for each normal BFD section. Most sections will already have a section symbol in outsymbols, but eg. SHT_GROUP sections will not, and we need the section symbol mapped at least in that case. */ for (asect = abfd->sections; asect; asect = asect->next) { if (sect_syms[asect->index] == NULL) { if (!sym_is_global (abfd, asect->symbol)) num_locals++; else num_globals++; } } /* Now sort the symbols so the local symbols are first. */ new_syms = bfd_alloc2 (abfd, num_locals + num_globals, sizeof (asymbol *)); if (new_syms == NULL) return FALSE; for (idx = 0; idx < symcount; idx++) { asymbol *sym = syms[idx]; unsigned int i; if (ignore_section_sym (abfd, sym)) continue; if (!sym_is_global (abfd, sym)) i = num_locals2++; else i = num_locals + num_globals2++; new_syms[i] = sym; sym->udata.i = i + 1; } for (asect = abfd->sections; asect; asect = asect->next) { if (sect_syms[asect->index] == NULL) { asymbol *sym = asect->symbol; unsigned int i; sect_syms[asect->index] = sym; if (!sym_is_global (abfd, sym)) i = num_locals2++; else i = num_locals + num_globals2++; new_syms[i] = sym; sym->udata.i = i + 1; } } bfd_set_symtab (abfd, new_syms, num_locals + num_globals); elf_num_locals (abfd) = num_locals; elf_num_globals (abfd) = num_globals; return TRUE; } /* Align to the maximum file alignment that could be required for any ELF data structure. */ static inline file_ptr align_file_position (file_ptr off, int align) { return (off + align - 1) & ~(align - 1); } /* Assign a file position to a section, optionally aligning to the required section alignment. */ file_ptr _bfd_elf_assign_file_position_for_section (Elf_Internal_Shdr *i_shdrp, file_ptr offset, bfd_boolean align) { if (align) { unsigned int al; al = i_shdrp->sh_addralign; if (al > 1) offset = BFD_ALIGN (offset, al); } i_shdrp->sh_offset = offset; if (i_shdrp->bfd_section != NULL) i_shdrp->bfd_section->filepos = offset; if (i_shdrp->sh_type != SHT_NOBITS) offset += i_shdrp->sh_size; return offset; } /* Compute the file positions we are going to put the sections at, and otherwise prepare to begin writing out the ELF file. If LINK_INFO is not NULL, this is being called by the ELF backend linker. */ bfd_boolean _bfd_elf_compute_section_file_positions (bfd *abfd, struct bfd_link_info *link_info) { const struct elf_backend_data *bed = get_elf_backend_data (abfd); bfd_boolean failed; struct bfd_strtab_hash *strtab = NULL; Elf_Internal_Shdr *shstrtab_hdr; if (abfd->output_has_begun) return TRUE; /* Do any elf backend specific processing first. */ if (bed->elf_backend_begin_write_processing) (*bed->elf_backend_begin_write_processing) (abfd, link_info); if (! prep_headers (abfd)) return FALSE; /* Post process the headers if necessary. */ if (bed->elf_backend_post_process_headers) (*bed->elf_backend_post_process_headers) (abfd, link_info); failed = FALSE; bfd_map_over_sections (abfd, elf_fake_sections, &failed); if (failed) return FALSE; if (!assign_section_numbers (abfd, link_info)) return FALSE; /* The backend linker builds symbol table information itself. */ if (link_info == NULL && bfd_get_symcount (abfd) > 0) { /* Non-zero if doing a relocatable link. */ int relocatable_p = ! (abfd->flags & (EXEC_P | DYNAMIC)); if (! swap_out_syms (abfd, &strtab, relocatable_p)) return FALSE; } if (link_info == NULL) { bfd_map_over_sections (abfd, bfd_elf_set_group_contents, &failed); if (failed) return FALSE; } shstrtab_hdr = &elf_tdata (abfd)->shstrtab_hdr; /* sh_name was set in prep_headers. */ shstrtab_hdr->sh_type = SHT_STRTAB; shstrtab_hdr->sh_flags = 0; shstrtab_hdr->sh_addr = 0; shstrtab_hdr->sh_size = _bfd_elf_strtab_size (elf_shstrtab (abfd)); shstrtab_hdr->sh_entsize = 0; shstrtab_hdr->sh_link = 0; shstrtab_hdr->sh_info = 0; /* sh_offset is set in assign_file_positions_except_relocs. */ shstrtab_hdr->sh_addralign = 1; if (!assign_file_positions_except_relocs (abfd, link_info)) return FALSE; if (link_info == NULL && bfd_get_symcount (abfd) > 0) { file_ptr off; Elf_Internal_Shdr *hdr; off = elf_tdata (abfd)->next_file_pos; hdr = &elf_tdata (abfd)->symtab_hdr; off = _bfd_elf_assign_file_position_for_section (hdr, off, TRUE); hdr = &elf_tdata (abfd)->symtab_shndx_hdr; if (hdr->sh_size != 0) off = _bfd_elf_assign_file_position_for_section (hdr, off, TRUE); hdr = &elf_tdata (abfd)->strtab_hdr; off = _bfd_elf_assign_file_position_for_section (hdr, off, TRUE); elf_tdata (abfd)->next_file_pos = off; /* Now that we know where the .strtab section goes, write it out. */ if (bfd_seek (abfd, hdr->sh_offset, SEEK_SET) != 0 || ! _bfd_stringtab_emit (abfd, strtab)) return FALSE; _bfd_stringtab_free (strtab); } abfd->output_has_begun = TRUE; return TRUE; } /* Make an initial estimate of the size of the program header. If we get the number wrong here, we'll redo section placement. */ static bfd_size_type get_program_header_size (bfd *abfd, struct bfd_link_info *info) { size_t segs; asection *s; const struct elf_backend_data *bed; /* Assume we will need exactly two PT_LOAD segments: one for text and one for data. */ segs = 2; s = bfd_get_section_by_name (abfd, ".interp"); if (s != NULL && (s->flags & SEC_LOAD) != 0) { /* If we have a loadable interpreter section, we need a PT_INTERP segment. In this case, assume we also need a PT_PHDR segment, although that may not be true for all targets. */ segs += 2; } if (bfd_get_section_by_name (abfd, ".dynamic") != NULL) { /* We need a PT_DYNAMIC segment. */ ++segs; if (elf_tdata (abfd)->relro) { /* We need a PT_GNU_RELRO segment only when there is a PT_DYNAMIC segment. */ ++segs; } } if (elf_tdata (abfd)->eh_frame_hdr) { /* We need a PT_GNU_EH_FRAME segment. */ ++segs; } if (elf_tdata (abfd)->stack_flags) { /* We need a PT_GNU_STACK segment. */ ++segs; } for (s = abfd->sections; s != NULL; s = s->next) { if ((s->flags & SEC_LOAD) != 0 && CONST_STRNEQ (s->name, ".note")) { /* We need a PT_NOTE segment. */ ++segs; } } for (s = abfd->sections; s != NULL; s = s->next) { if (s->flags & SEC_THREAD_LOCAL) { /* We need a PT_TLS segment. */ ++segs; break; } } /* Let the backend count up any program headers it might need. */ bed = get_elf_backend_data (abfd); if (bed->elf_backend_additional_program_headers) { int a; a = (*bed->elf_backend_additional_program_headers) (abfd, info); if (a == -1) abort (); segs += a; } return segs * bed->s->sizeof_phdr; } /* Create a mapping from a set of sections to a program segment. */ static struct elf_segment_map * make_mapping (bfd *abfd, asection **sections, unsigned int from, unsigned int to, bfd_boolean phdr) { struct elf_segment_map *m; unsigned int i; asection **hdrpp; bfd_size_type amt; amt = sizeof (struct elf_segment_map); amt += (to - from - 1) * sizeof (asection *); m = bfd_zalloc (abfd, amt); if (m == NULL) return NULL; m->next = NULL; m->p_type = PT_LOAD; for (i = from, hdrpp = sections + from; i < to; i++, hdrpp++) m->sections[i - from] = *hdrpp; m->count = to - from; if (from == 0 && phdr) { /* Include the headers in the first PT_LOAD segment. */ m->includes_filehdr = 1; m->includes_phdrs = 1; } return m; } /* Create the PT_DYNAMIC segment, which includes DYNSEC. Returns NULL on failure. */ struct elf_segment_map * _bfd_elf_make_dynamic_segment (bfd *abfd, asection *dynsec) { struct elf_segment_map *m; m = bfd_zalloc (abfd, sizeof (struct elf_segment_map)); if (m == NULL) return NULL; m->next = NULL; m->p_type = PT_DYNAMIC; m->count = 1; m->sections[0] = dynsec; return m; } /* Possibly add or remove segments from the segment map. */ static bfd_boolean elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info) { struct elf_segment_map **m; const struct elf_backend_data *bed; /* The placement algorithm assumes that non allocated sections are not in PT_LOAD segments. We ensure this here by removing such sections from the segment map. We also remove excluded sections. Finally, any PT_LOAD segment without sections is removed. */ m = &elf_tdata (abfd)->segment_map; while (*m) { unsigned int i, new_count; for (new_count = 0, i = 0; i < (*m)->count; i++) { if (((*m)->sections[i]->flags & SEC_EXCLUDE) == 0 && (((*m)->sections[i]->flags & SEC_ALLOC) != 0 || (*m)->p_type != PT_LOAD)) { (*m)->sections[new_count] = (*m)->sections[i]; new_count++; } } (*m)->count = new_count; if ((*m)->p_type == PT_LOAD && (*m)->count == 0) *m = (*m)->next; else m = &(*m)->next; } bed = get_elf_backend_data (abfd); if (bed->elf_backend_modify_segment_map != NULL) { if (!(*bed->elf_backend_modify_segment_map) (abfd, info)) return FALSE; } return TRUE; } /* Set up a mapping from BFD sections to program segments. */ bfd_boolean _bfd_elf_map_sections_to_segments (bfd *abfd, struct bfd_link_info *info) { unsigned int count; struct elf_segment_map *m; asection **sections = NULL; const struct elf_backend_data *bed = get_elf_backend_data (abfd); if (elf_tdata (abfd)->segment_map == NULL && bfd_count_sections (abfd) != 0) { asection *s; unsigned int i; struct elf_segment_map *mfirst; struct elf_segment_map **pm; asection *last_hdr; bfd_vma last_size; unsigned int phdr_index; bfd_vma maxpagesize; asection **hdrpp; bfd_boolean phdr_in_segment = TRUE; bfd_boolean writable; int tls_count = 0; asection *first_tls = NULL; asection *dynsec, *eh_frame_hdr; bfd_size_type amt; /* Select the allocated sections, and sort them. */ sections = bfd_malloc2 (bfd_count_sections (abfd), sizeof (asection *)); if (sections == NULL) goto error_return; i = 0; for (s = abfd->sections; s != NULL; s = s->next) { if ((s->flags & SEC_ALLOC) != 0) { sections[i] = s; ++i; } } BFD_ASSERT (i <= bfd_count_sections (abfd)); count = i; qsort (sections, (size_t) count, sizeof (asection *), elf_sort_sections); /* Build the mapping. */ mfirst = NULL; pm = &mfirst; /* If we have a .interp section, then create a PT_PHDR segment for the program headers and a PT_INTERP segment for the .interp section. */ s = bfd_get_section_by_name (abfd, ".interp"); if (s != NULL && (s->flags & SEC_LOAD) != 0) { amt = sizeof (struct elf_segment_map); m = bfd_zalloc (abfd, amt); if (m == NULL) goto error_return; m->next = NULL; m->p_type = PT_PHDR; /* FIXME: UnixWare and Solaris set PF_X, Irix 5 does not. */ m->p_flags = PF_R | PF_X; m->p_flags_valid = 1; m->includes_phdrs = 1; *pm = m; pm = &m->next; amt = sizeof (struct elf_segment_map); m = bfd_zalloc (abfd, amt); if (m == NULL) goto error_return; m->next = NULL; m->p_type = PT_INTERP; m->count = 1; m->sections[0] = s; *pm = m; pm = &m->next; } /* Look through the sections. We put sections in the same program segment when the start of the second section can be placed within a few bytes of the end of the first section. */ last_hdr = NULL; last_size = 0; phdr_index = 0; maxpagesize = bed->maxpagesize; writable = FALSE; dynsec = bfd_get_section_by_name (abfd, ".dynamic"); if (dynsec != NULL && (dynsec->flags & SEC_LOAD) == 0) dynsec = NULL; /* Deal with -Ttext or something similar such that the first section is not adjacent to the program headers. This is an approximation, since at this point we don't know exactly how many program headers we will need. */ if (count > 0) { bfd_size_type phdr_size = elf_tdata (abfd)->program_header_size; if (phdr_size == (bfd_size_type) -1) phdr_size = get_program_header_size (abfd, info); if ((abfd->flags & D_PAGED) == 0 || sections[0]->lma < phdr_size || sections[0]->lma % maxpagesize < phdr_size % maxpagesize) phdr_in_segment = FALSE; } for (i = 0, hdrpp = sections; i < count; i++, hdrpp++) { asection *hdr; bfd_boolean new_segment; hdr = *hdrpp; /* See if this section and the last one will fit in the same segment. */ if (last_hdr == NULL) { /* If we don't have a segment yet, then we don't need a new one (we build the last one after this loop). */ new_segment = FALSE; } else if (last_hdr->lma - last_hdr->vma != hdr->lma - hdr->vma) { /* If this section has a different relation between the virtual address and the load address, then we need a new segment. */ new_segment = TRUE; } else if (BFD_ALIGN (last_hdr->lma + last_size, maxpagesize) < BFD_ALIGN (hdr->lma, maxpagesize)) { /* If putting this section in this segment would force us to skip a page in the segment, then we need a new segment. */ new_segment = TRUE; } else if ((last_hdr->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) == 0 && (hdr->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != 0) { /* We don't want to put a loadable section after a nonloadable section in the same segment. Consider .tbss sections as loadable for this purpose. */ new_segment = TRUE; } else if ((abfd->flags & D_PAGED) == 0) { /* If the file is not demand paged, which means that we don't require the sections to be correctly aligned in the file, then there is no other reason for a new segment. */ new_segment = FALSE; } else if (! writable && (hdr->flags & SEC_READONLY) == 0 && (((last_hdr->lma + last_size - 1) & ~(maxpagesize - 1)) != (hdr->lma & ~(maxpagesize - 1)))) { /* We don't want to put a writable section in a read only segment, unless they are on the same page in memory anyhow. We already know that the last section does not bring us past the current section on the page, so the only case in which the new section is not on the same page as the previous section is when the previous section ends precisely on a page boundary. */ new_segment = TRUE; } else { /* Otherwise, we can use the same segment. */ new_segment = FALSE; } /* Allow interested parties a chance to override our decision. */ if (last_hdr && info->callbacks->override_segment_assignment) new_segment = info->callbacks->override_segment_assignment (info, abfd, hdr, last_hdr, new_segment); if (! new_segment) { if ((hdr->flags & SEC_READONLY) == 0) writable = TRUE; last_hdr = hdr; /* .tbss sections effectively have zero size. */ if ((hdr->flags & (SEC_THREAD_LOCAL | SEC_LOAD)) != SEC_THREAD_LOCAL) last_size = hdr->size; else last_size = 0; continue; } /* We need a new program segment. We must create a new program header holding all the sections from phdr_index until hdr. */ m = make_mapping (abfd, sections, phdr_index, i, phdr_in_segment); if (m == NULL) goto error_return; *pm = m; pm = &m->next; if ((hdr->flags & SEC_READONLY) == 0) writable = TRUE; else writable = FALSE; last_hdr = hdr; /* .tbss sections effectively have zero size. */ if ((hdr->flags & (SEC_THREAD_LOCAL | SEC_LOAD)) != SEC_THREAD_LOCAL) last_size = hdr->size; else last_size = 0; phdr_index = i; phdr_in_segment = FALSE; } /* Create a final PT_LOAD program segment. */ if (last_hdr != NULL) { m = make_mapping (abfd, sections, phdr_index, i, phdr_in_segment); if (m == NULL) goto error_return; *pm = m; pm = &m->next; } /* If there is a .dynamic section, throw in a PT_DYNAMIC segment. */ if (dynsec != NULL) { m = _bfd_elf_make_dynamic_segment (abfd, dynsec); if (m == NULL) goto error_return; *pm = m; pm = &m->next; } /* For each loadable .note section, add a PT_NOTE segment. We don't use bfd_get_section_by_name, because if we link together nonloadable .note sections and loadable .note sections, we will generate two .note sections in the output file. FIXME: Using names for section types is bogus anyhow. */ for (s = abfd->sections; s != NULL; s = s->next) { if ((s->flags & SEC_LOAD) != 0 && CONST_STRNEQ (s->name, ".note")) { amt = sizeof (struct elf_segment_map); m = bfd_zalloc (abfd, amt); if (m == NULL) goto error_return; m->next = NULL; m->p_type = PT_NOTE; m->count = 1; m->sections[0] = s; *pm = m; pm = &m->next; } if (s->flags & SEC_THREAD_LOCAL) { if (! tls_count) first_tls = s; tls_count++; } } /* If there are any SHF_TLS output sections, add PT_TLS segment. */ if (tls_count > 0) { int i; amt = sizeof (struct elf_segment_map); amt += (tls_count - 1) * sizeof (asection *); m = bfd_zalloc (abfd, amt); if (m == NULL) goto error_return; m->next = NULL; m->p_type = PT_TLS; m->count = tls_count; /* Mandated PF_R. */ m->p_flags = PF_R; m->p_flags_valid = 1; for (i = 0; i < tls_count; ++i) { BFD_ASSERT (first_tls->flags & SEC_THREAD_LOCAL); m->sections[i] = first_tls; first_tls = first_tls->next; } *pm = m; pm = &m->next; } /* If there is a .eh_frame_hdr section, throw in a PT_GNU_EH_FRAME segment. */ eh_frame_hdr = elf_tdata (abfd)->eh_frame_hdr; if (eh_frame_hdr != NULL && (eh_frame_hdr->output_section->flags & SEC_LOAD) != 0) { amt = sizeof (struct elf_segment_map); m = bfd_zalloc (abfd, amt); if (m == NULL) goto error_return; m->next = NULL; m->p_type = PT_GNU_EH_FRAME; m->count = 1; m->sections[0] = eh_frame_hdr->output_section; *pm = m; pm = &m->next; } if (elf_tdata (abfd)->stack_flags) { amt = sizeof (struct elf_segment_map); m = bfd_zalloc (abfd, amt); if (m == NULL) goto error_return; m->next = NULL; m->p_type = PT_GNU_STACK; m->p_flags = elf_tdata (abfd)->stack_flags; m->p_flags_valid = 1; *pm = m; pm = &m->next; } if (dynsec != NULL && elf_tdata (abfd)->relro) { /* We make a PT_GNU_RELRO segment only when there is a PT_DYNAMIC segment. */ amt = sizeof (struct elf_segment_map); m = bfd_zalloc (abfd, amt); if (m == NULL) goto error_return; m->next = NULL; m->p_type = PT_GNU_RELRO; m->p_flags = PF_R; m->p_flags_valid = 1; *pm = m; pm = &m->next; } free (sections); elf_tdata (abfd)->segment_map = mfirst; } if (!elf_modify_segment_map (abfd, info)) return FALSE; for (count = 0, m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next) ++count; elf_tdata (abfd)->program_header_size = count * bed->s->sizeof_phdr; return TRUE; error_return: if (sections != NULL) free (sections); return FALSE; } /* Sort sections by address. */ static int elf_sort_sections (const void *arg1, const void *arg2) { const asection *sec1 = *(const asection **) arg1; const asection *sec2 = *(const asection **) arg2; bfd_size_type size1, size2; /* Sort by LMA first, since this is the address used to place the section into a segment. */ if (sec1->lma < sec2->lma) return -1; else if (sec1->lma > sec2->lma) return 1; /* Then sort by VMA. Normally the LMA and the VMA will be the same, and this will do nothing. */ if (sec1->vma < sec2->vma) return -1; else if (sec1->vma > sec2->vma) return 1; /* Put !SEC_LOAD sections after SEC_LOAD ones. */ #define TOEND(x) (((x)->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) == 0) if (TOEND (sec1)) { if (TOEND (sec2)) { /* If the indicies are the same, do not return 0 here, but continue to try the next comparison. */ if (sec1->target_index - sec2->target_index != 0) return sec1->target_index - sec2->target_index; } else return 1; } else if (TOEND (sec2)) return -1; #undef TOEND /* Sort by size, to put zero sized sections before others at the same address. */ size1 = (sec1->flags & SEC_LOAD) ? sec1->size : 0; size2 = (sec2->flags & SEC_LOAD) ? sec2->size : 0; if (size1 < size2) return -1; if (size1 > size2) return 1; return sec1->target_index - sec2->target_index; } /* Ian Lance Taylor writes: We shouldn't be using % with a negative signed number. That's just not good. We have to make sure either that the number is not negative, or that the number has an unsigned type. When the types are all the same size they wind up as unsigned. When file_ptr is a larger signed type, the arithmetic winds up as signed long long, which is wrong. What we're trying to say here is something like ``increase OFF by the least amount that will cause it to be equal to the VMA modulo the page size.'' */ /* In other words, something like: vma_offset = m->sections[0]->vma % bed->maxpagesize; off_offset = off % bed->maxpagesize; if (vma_offset < off_offset) adjustment = vma_offset + bed->maxpagesize - off_offset; else adjustment = vma_offset - off_offset; which can can be collapsed into the expression below. */ static file_ptr vma_page_aligned_bias (bfd_vma vma, ufile_ptr off, bfd_vma maxpagesize) { return ((vma - off) % maxpagesize); } /* Assign file positions to the sections based on the mapping from sections to segments. This function also sets up some fields in the file header. */ static bfd_boolean assign_file_positions_for_load_sections (bfd *abfd, struct bfd_link_info *link_info) { const struct elf_backend_data *bed = get_elf_backend_data (abfd); struct elf_segment_map *m; Elf_Internal_Phdr *phdrs; Elf_Internal_Phdr *p; file_ptr off; bfd_size_type maxpagesize; unsigned int alloc; unsigned int i, j; if (link_info == NULL && !elf_modify_segment_map (abfd, link_info)) return FALSE; alloc = 0; for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next) ++alloc; elf_elfheader (abfd)->e_phoff = bed->s->sizeof_ehdr; elf_elfheader (abfd)->e_phentsize = bed->s->sizeof_phdr; elf_elfheader (abfd)->e_phnum = alloc; if (elf_tdata (abfd)->program_header_size == (bfd_size_type) -1) elf_tdata (abfd)->program_header_size = alloc * bed->s->sizeof_phdr; else BFD_ASSERT (elf_tdata (abfd)->program_header_size >= alloc * bed->s->sizeof_phdr); if (alloc == 0) { elf_tdata (abfd)->next_file_pos = bed->s->sizeof_ehdr; return TRUE; } phdrs = bfd_alloc2 (abfd, alloc, sizeof (Elf_Internal_Phdr)); elf_tdata (abfd)->phdr = phdrs; if (phdrs == NULL) return FALSE; maxpagesize = 1; if ((abfd->flags & D_PAGED) != 0) maxpagesize = bed->maxpagesize; off = bed->s->sizeof_ehdr; off += alloc * bed->s->sizeof_phdr; for (m = elf_tdata (abfd)->segment_map, p = phdrs, j = 0; m != NULL; m = m->next, p++, j++) { asection **secpp; bfd_vma off_adjust; bfd_boolean no_contents; /* If elf_segment_map is not from map_sections_to_segments, the sections may not be correctly ordered. NOTE: sorting should not be done to the PT_NOTE section of a corefile, which may contain several pseudo-sections artificially created by bfd. Sorting these pseudo-sections breaks things badly. */ if (m->count > 1 && !(elf_elfheader (abfd)->e_type == ET_CORE && m->p_type == PT_NOTE)) qsort (m->sections, (size_t) m->count, sizeof (asection *), elf_sort_sections); /* An ELF segment (described by Elf_Internal_Phdr) may contain a number of sections with contents contributing to both p_filesz and p_memsz, followed by a number of sections with no contents that just contribute to p_memsz. In this loop, OFF tracks next available file offset for PT_LOAD and PT_NOTE segments. */ p->p_type = m->p_type; p->p_flags = m->p_flags; if (m->count == 0) p->p_vaddr = 0; else p->p_vaddr = m->sections[0]->vma - m->p_vaddr_offset; if (m->p_paddr_valid) p->p_paddr = m->p_paddr; else if (m->count == 0) p->p_paddr = 0; else p->p_paddr = m->sections[0]->lma - m->p_vaddr_offset; if (p->p_type == PT_LOAD && (abfd->flags & D_PAGED) != 0) { /* p_align in demand paged PT_LOAD segments effectively stores the maximum page size. When copying an executable with objcopy, we set m->p_align from the input file. Use this value for maxpagesize rather than bed->maxpagesize, which may be different. Note that we use maxpagesize for PT_TLS segment alignment later in this function, so we are relying on at least one PT_LOAD segment appearing before a PT_TLS segment. */ if (m->p_align_valid) maxpagesize = m->p_align; p->p_align = maxpagesize; } else if (m->count == 0) p->p_align = 1 << bed->s->log_file_align; else if (m->p_align_valid) p->p_align = m->p_align; else p->p_align = 0; no_contents = FALSE; off_adjust = 0; if (p->p_type == PT_NOTE) { for (i = 0; i < m->count; i++) elf_section_type (m->sections[i]) = SHT_NOTE; } else if (p->p_type == PT_LOAD && m->count > 0) { bfd_size_type align; unsigned int align_power = 0; if (m->p_align_valid) align = p->p_align; else { for (i = 0, secpp = m->sections; i < m->count; i++, secpp++) { unsigned int secalign; secalign = bfd_get_section_alignment (abfd, *secpp); if (secalign > align_power) align_power = secalign; } align = (bfd_size_type) 1 << align_power; if (align < maxpagesize) align = maxpagesize; } for (i = 0; i < m->count; i++) if ((m->sections[i]->flags & (SEC_LOAD | SEC_HAS_CONTENTS)) == 0) /* If we aren't making room for this section, then it must be SHT_NOBITS regardless of what we've set via struct bfd_elf_special_section. */ elf_section_type (m->sections[i]) = SHT_NOBITS; /* Find out whether this segment contains any loadable sections. If the first section isn't loadable, the same holds for any other sections. */ i = 0; while (elf_section_type (m->sections[i]) == SHT_NOBITS) { /* If a segment starts with .tbss, we need to look at the next section to decide whether the segment has any loadable sections. */ if ((elf_section_flags (m->sections[i]) & SHF_TLS) == 0 || ++i >= m->count) { no_contents = TRUE; break; } } off_adjust = vma_page_aligned_bias (m->sections[0]->vma, off, align); off += off_adjust; if (no_contents) { /* We shouldn't need to align the segment on disk since the segment doesn't need file space, but the gABI arguably requires the alignment and glibc ld.so checks it. So to comply with the alignment requirement but not waste file space, we adjust p_offset for just this segment. (OFF_ADJUST is subtracted from OFF later.) This may put p_offset past the end of file, but that shouldn't matter. */ } else off_adjust = 0; } /* Make sure the .dynamic section is the first section in the PT_DYNAMIC segment. */ else if (p->p_type == PT_DYNAMIC && m->count > 1 && strcmp (m->sections[0]->name, ".dynamic") != 0) { _bfd_error_handler (_("%B: The first section in the PT_DYNAMIC segment is not the .dynamic section"), abfd); bfd_set_error (bfd_error_bad_value); return FALSE; } p->p_offset = 0; p->p_filesz = 0; p->p_memsz = 0; if (m->includes_filehdr) { if (!m->p_flags_valid) p->p_flags |= PF_R; p->p_filesz = bed->s->sizeof_ehdr; p->p_memsz = bed->s->sizeof_ehdr; if (m->count > 0) { BFD_ASSERT (p->p_type == PT_LOAD); if (p->p_vaddr < (bfd_vma) off) { (*_bfd_error_handler) (_("%B: Not enough room for program headers, try linking with -N"), abfd); bfd_set_error (bfd_error_bad_value); return FALSE; } p->p_vaddr -= off; if (!m->p_paddr_valid) p->p_paddr -= off; } } if (m->includes_phdrs) { if (!m->p_flags_valid) p->p_flags |= PF_R; if (!m->includes_filehdr) { p->p_offset = bed->s->sizeof_ehdr; if (m->count > 0) { BFD_ASSERT (p->p_type == PT_LOAD); p->p_vaddr -= off - p->p_offset; if (!m->p_paddr_valid) p->p_paddr -= off - p->p_offset; } } p->p_filesz += alloc * bed->s->sizeof_phdr; p->p_memsz += alloc * bed->s->sizeof_phdr; } if (p->p_type == PT_LOAD || (p->p_type == PT_NOTE && bfd_get_format (abfd) == bfd_core)) { if (!m->includes_filehdr && !m->includes_phdrs) p->p_offset = off; else { file_ptr adjust; adjust = off - (p->p_offset + p->p_filesz); if (!no_contents) p->p_filesz += adjust; p->p_memsz += adjust; } } /* Set up p_filesz, p_memsz, p_align and p_flags from the section maps. Set filepos for sections in PT_LOAD segments, and in core files, for sections in PT_NOTE segments. assign_file_positions_for_non_load_sections will set filepos for other sections and update p_filesz for other segments. */ for (i = 0, secpp = m->sections; i < m->count; i++, secpp++) { asection *sec; bfd_size_type align; Elf_Internal_Shdr *this_hdr; sec = *secpp; this_hdr = &elf_section_data (sec)->this_hdr; align = (bfd_size_type) 1 << bfd_get_section_alignment (abfd, sec); if (p->p_type == PT_LOAD || p->p_type == PT_TLS) { bfd_signed_vma adjust = sec->lma - (p->p_paddr + p->p_memsz); if (this_hdr->sh_type != SHT_NOBITS || ((this_hdr->sh_flags & SHF_ALLOC) != 0 && ((this_hdr->sh_flags & SHF_TLS) == 0 || p->p_type == PT_TLS))) { if (adjust < 0) { (*_bfd_error_handler) (_("%B: section %A lma 0x%lx overlaps previous sections"), abfd, sec, (unsigned long) sec->lma); adjust = 0; } p->p_memsz += adjust; if (this_hdr->sh_type != SHT_NOBITS) { off += adjust; p->p_filesz += adjust; } } } if (p->p_type == PT_NOTE && bfd_get_format (abfd) == bfd_core) { /* The section at i == 0 is the one that actually contains everything. */ if (i == 0) { this_hdr->sh_offset = sec->filepos = off; off += this_hdr->sh_size; p->p_filesz = this_hdr->sh_size; p->p_memsz = 0; p->p_align = 1; } else { /* The rest are fake sections that shouldn't be written. */ sec->filepos = 0; sec->size = 0; sec->flags = 0; continue; } } else { if (p->p_type == PT_LOAD) { this_hdr->sh_offset = sec->filepos = off; if (this_hdr->sh_type != SHT_NOBITS) off += this_hdr->sh_size; } if (this_hdr->sh_type != SHT_NOBITS) { p->p_filesz += this_hdr->sh_size; /* A load section without SHF_ALLOC is something like a note section in a PT_NOTE segment. These take file space but are not loaded into memory. */ if ((this_hdr->sh_flags & SHF_ALLOC) != 0) p->p_memsz += this_hdr->sh_size; } else if ((this_hdr->sh_flags & SHF_ALLOC) != 0) { if (p->p_type == PT_TLS) p->p_memsz += this_hdr->sh_size; /* .tbss is special. It doesn't contribute to p_memsz of normal segments. */ else if ((this_hdr->sh_flags & SHF_TLS) == 0) p->p_memsz += this_hdr->sh_size; } if (p->p_type == PT_GNU_RELRO) p->p_align = 1; else if (align > p->p_align && !m->p_align_valid && (p->p_type != PT_LOAD || (abfd->flags & D_PAGED) == 0)) p->p_align = align; } if (!m->p_flags_valid) { p->p_flags |= PF_R; if ((this_hdr->sh_flags & SHF_EXECINSTR) != 0) p->p_flags |= PF_X; if ((this_hdr->sh_flags & SHF_WRITE) != 0) p->p_flags |= PF_W; } } off -= off_adjust; /* Check that all sections are in a PT_LOAD segment. Don't check funky gdb generated core files. */ if (p->p_type == PT_LOAD && bfd_get_format (abfd) != bfd_core) for (i = 0, secpp = m->sections; i < m->count; i++, secpp++) { Elf_Internal_Shdr *this_hdr; asection *sec; sec = *secpp; this_hdr = &(elf_section_data(sec)->this_hdr); if (this_hdr->sh_size != 0 && !ELF_IS_SECTION_IN_SEGMENT_FILE (this_hdr, p)) { (*_bfd_error_handler) (_("%B: section `%A' can't be allocated in segment %d"), abfd, sec, j); bfd_set_error (bfd_error_bad_value); return FALSE; } } } elf_tdata (abfd)->next_file_pos = off; return TRUE; } /* Assign file positions for the other sections. */ static bfd_boolean assign_file_positions_for_non_load_sections (bfd *abfd, struct bfd_link_info *link_info) { const struct elf_backend_data *bed = get_elf_backend_data (abfd); Elf_Internal_Shdr **i_shdrpp; Elf_Internal_Shdr **hdrpp; Elf_Internal_Phdr *phdrs; Elf_Internal_Phdr *p; struct elf_segment_map *m; bfd_vma filehdr_vaddr, filehdr_paddr; bfd_vma phdrs_vaddr, phdrs_paddr; file_ptr off; unsigned int num_sec; unsigned int i; unsigned int count; i_shdrpp = elf_elfsections (abfd); num_sec = elf_numsections (abfd); off = elf_tdata (abfd)->next_file_pos; for (i = 1, hdrpp = i_shdrpp + 1; i < num_sec; i++, hdrpp++) { struct elf_obj_tdata *tdata = elf_tdata (abfd); Elf_Internal_Shdr *hdr; hdr = *hdrpp; if (hdr->bfd_section != NULL && (hdr->bfd_section->filepos != 0 || (hdr->sh_type == SHT_NOBITS && hdr->contents == NULL))) BFD_ASSERT (hdr->sh_offset == hdr->bfd_section->filepos); else if ((hdr->sh_flags & SHF_ALLOC) != 0) { if (hdr->sh_size != 0) ((*_bfd_error_handler) (_("%B: warning: allocated section `%s' not in segment"), abfd, (hdr->bfd_section == NULL ? "*unknown*" : hdr->bfd_section->name))); /* We don't need to page align empty sections. */ if ((abfd->flags & D_PAGED) != 0 && hdr->sh_size != 0) off += vma_page_aligned_bias (hdr->sh_addr, off, bed->maxpagesize); else off += vma_page_aligned_bias (hdr->sh_addr, off, hdr->sh_addralign); off = _bfd_elf_assign_file_position_for_section (hdr, off, FALSE); } else if (((hdr->sh_type == SHT_REL || hdr->sh_type == SHT_RELA) && hdr->bfd_section == NULL) || hdr == i_shdrpp[tdata->symtab_section] || hdr == i_shdrpp[tdata->symtab_shndx_section] || hdr == i_shdrpp[tdata->strtab_section]) hdr->sh_offset = -1; else off = _bfd_elf_assign_file_position_for_section (hdr, off, TRUE); if (i == SHN_LORESERVE - 1) { i += SHN_HIRESERVE + 1 - SHN_LORESERVE; hdrpp += SHN_HIRESERVE + 1 - SHN_LORESERVE; } } /* Now that we have set the section file positions, we can set up the file positions for the non PT_LOAD segments. */ count = 0; filehdr_vaddr = 0; filehdr_paddr = 0; phdrs_vaddr = bed->maxpagesize + bed->s->sizeof_ehdr; phdrs_paddr = 0; phdrs = elf_tdata (abfd)->phdr; for (m = elf_tdata (abfd)->segment_map, p = phdrs; m != NULL; m = m->next, p++) { ++count; if (p->p_type != PT_LOAD) continue; if (m->includes_filehdr) { filehdr_vaddr = p->p_vaddr; filehdr_paddr = p->p_paddr; } if (m->includes_phdrs) { phdrs_vaddr = p->p_vaddr; phdrs_paddr = p->p_paddr; if (m->includes_filehdr) { phdrs_vaddr += bed->s->sizeof_ehdr; phdrs_paddr += bed->s->sizeof_ehdr; } } } for (m = elf_tdata (abfd)->segment_map, p = phdrs; m != NULL; m = m->next, p++) { if (m->count != 0) { if (p->p_type != PT_LOAD && (p->p_type != PT_NOTE || bfd_get_format (abfd) != bfd_core)) { Elf_Internal_Shdr *hdr; BFD_ASSERT (!m->includes_filehdr && !m->includes_phdrs); hdr = &elf_section_data (m->sections[m->count - 1])->this_hdr; p->p_filesz = (m->sections[m->count - 1]->filepos - m->sections[0]->filepos); if (hdr->sh_type != SHT_NOBITS) p->p_filesz += hdr->sh_size; p->p_offset = m->sections[0]->filepos; } } else { if (m->includes_filehdr) { p->p_vaddr = filehdr_vaddr; if (! m->p_paddr_valid) p->p_paddr = filehdr_paddr; } else if (m->includes_phdrs) { p->p_vaddr = phdrs_vaddr; if (! m->p_paddr_valid) p->p_paddr = phdrs_paddr; } else if (p->p_type == PT_GNU_RELRO) { Elf_Internal_Phdr *lp; for (lp = phdrs; lp < phdrs + count; ++lp) { if (lp->p_type == PT_LOAD && lp->p_vaddr <= link_info->relro_end && lp->p_vaddr >= link_info->relro_start && (lp->p_vaddr + lp->p_filesz >= link_info->relro_end)) break; } if (lp < phdrs + count && link_info->relro_end > lp->p_vaddr) { p->p_vaddr = lp->p_vaddr; p->p_paddr = lp->p_paddr; p->p_offset = lp->p_offset; p->p_filesz = link_info->relro_end - lp->p_vaddr; p->p_memsz = p->p_filesz; p->p_align = 1; p->p_flags = (lp->p_flags & ~PF_W); } else { memset (p, 0, sizeof *p); p->p_type = PT_NULL; } } } } elf_tdata (abfd)->next_file_pos = off; return TRUE; } /* Work out the file positions of all the sections. This is called by _bfd_elf_compute_section_file_positions. All the section sizes and VMAs must be known before this is called. Reloc sections come in two flavours: Those processed specially as "side-channel" data attached to a section to which they apply, and those that bfd doesn't process as relocations. The latter sort are stored in a normal bfd section by bfd_section_from_shdr. We don't consider the former sort here, unless they form part of the loadable image. Reloc sections not assigned here will be handled later by assign_file_positions_for_relocs. We also don't set the positions of the .symtab and .strtab here. */ static bfd_boolean assign_file_positions_except_relocs (bfd *abfd, struct bfd_link_info *link_info) { struct elf_obj_tdata *tdata = elf_tdata (abfd); Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd); file_ptr off; const struct elf_backend_data *bed = get_elf_backend_data (abfd); if ((abfd->flags & (EXEC_P | DYNAMIC)) == 0 && bfd_get_format (abfd) != bfd_core) { Elf_Internal_Shdr ** const i_shdrpp = elf_elfsections (abfd); unsigned int num_sec = elf_numsections (abfd); Elf_Internal_Shdr **hdrpp; unsigned int i; /* Start after the ELF header. */ off = i_ehdrp->e_ehsize; /* We are not creating an executable, which means that we are not creating a program header, and that the actual order of the sections in the file is unimportant. */ for (i = 1, hdrpp = i_shdrpp + 1; i < num_sec; i++, hdrpp++) { Elf_Internal_Shdr *hdr; hdr = *hdrpp; if (((hdr->sh_type == SHT_REL || hdr->sh_type == SHT_RELA) && hdr->bfd_section == NULL) || i == tdata->symtab_section || i == tdata->symtab_shndx_section || i == tdata->strtab_section) { hdr->sh_offset = -1; } else off = _bfd_elf_assign_file_position_for_section (hdr, off, TRUE); if (i == SHN_LORESERVE - 1) { i += SHN_HIRESERVE + 1 - SHN_LORESERVE; hdrpp += SHN_HIRESERVE + 1 - SHN_LORESERVE; } } } else { unsigned int alloc; /* Assign file positions for the loaded sections based on the assignment of sections to segments. */ if (!assign_file_positions_for_load_sections (abfd, link_info)) return FALSE; /* And for non-load sections. */ if (!assign_file_positions_for_non_load_sections (abfd, link_info)) return FALSE; if (bed->elf_backend_modify_program_headers != NULL) { if (!(*bed->elf_backend_modify_program_headers) (abfd, link_info)) return FALSE; } /* Write out the program headers. */ alloc = tdata->program_header_size / bed->s->sizeof_phdr; if (bfd_seek (abfd, (bfd_signed_vma) bed->s->sizeof_ehdr, SEEK_SET) != 0 || bed->s->write_out_phdrs (abfd, tdata->phdr, alloc) != 0) return FALSE; off = tdata->next_file_pos; } /* Place the section headers. */ off = align_file_position (off, 1 << bed->s->log_file_align); i_ehdrp->e_shoff = off; off += i_ehdrp->e_shnum * i_ehdrp->e_shentsize; tdata->next_file_pos = off; return TRUE; } static bfd_boolean prep_headers (bfd *abfd) { Elf_Internal_Ehdr *i_ehdrp; /* Elf file header, internal form */ Elf_Internal_Phdr *i_phdrp = 0; /* Program header table, internal form */ Elf_Internal_Shdr **i_shdrp; /* Section header table, internal form */ struct elf_strtab_hash *shstrtab; const struct elf_backend_data *bed = get_elf_backend_data (abfd); i_ehdrp = elf_elfheader (abfd); i_shdrp = elf_elfsections (abfd); shstrtab = _bfd_elf_strtab_init (); if (shstrtab == NULL) return FALSE; elf_shstrtab (abfd) = shstrtab; i_ehdrp->e_ident[EI_MAG0] = ELFMAG0; i_ehdrp->e_ident[EI_MAG1] = ELFMAG1; i_ehdrp->e_ident[EI_MAG2] = ELFMAG2; i_ehdrp->e_ident[EI_MAG3] = ELFMAG3; i_ehdrp->e_ident[EI_CLASS] = bed->s->elfclass; i_ehdrp->e_ident[EI_DATA] = bfd_big_endian (abfd) ? ELFDATA2MSB : ELFDATA2LSB; i_ehdrp->e_ident[EI_VERSION] = bed->s->ev_current; i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; if ((abfd->flags & DYNAMIC) != 0) i_ehdrp->e_type = ET_DYN; else if ((abfd->flags & EXEC_P) != 0) i_ehdrp->e_type = ET_EXEC; else if (bfd_get_format (abfd) == bfd_core) i_ehdrp->e_type = ET_CORE; else i_ehdrp->e_type = ET_REL; switch (bfd_get_arch (abfd)) { case bfd_arch_unknown: i_ehdrp->e_machine = EM_NONE; break; /* There used to be a long list of cases here, each one setting e_machine to the same EM_* macro #defined as ELF_MACHINE_CODE in the corresponding bfd definition. To avoid duplication, the switch was removed. Machines that need special handling can generally do it in elf_backend_final_write_processing(), unless they need the information earlier than the final write. Such need can generally be supplied by replacing the tests for e_machine with the conditions used to determine it. */ default: i_ehdrp->e_machine = bed->elf_machine_code; } i_ehdrp->e_version = bed->s->ev_current; i_ehdrp->e_ehsize = bed->s->sizeof_ehdr; /* No program header, for now. */ i_ehdrp->e_phoff = 0; i_ehdrp->e_phentsize = 0; i_ehdrp->e_phnum = 0; /* Each bfd section is section header entry. */ i_ehdrp->e_entry = bfd_get_start_address (abfd); i_ehdrp->e_shentsize = bed->s->sizeof_shdr; /* If we're building an executable, we'll need a program header table. */ if (abfd->flags & EXEC_P) /* It all happens later. */ ; else { i_ehdrp->e_phentsize = 0; i_phdrp = 0; i_ehdrp->e_phoff = 0; } elf_tdata (abfd)->symtab_hdr.sh_name = (unsigned int) _bfd_elf_strtab_add (shstrtab, ".symtab", FALSE); elf_tdata (abfd)->strtab_hdr.sh_name = (unsigned int) _bfd_elf_strtab_add (shstrtab, ".strtab", FALSE); elf_tdata (abfd)->shstrtab_hdr.sh_name = (unsigned int) _bfd_elf_strtab_add (shstrtab, ".shstrtab", FALSE); if (elf_tdata (abfd)->symtab_hdr.sh_name == (unsigned int) -1 || elf_tdata (abfd)->symtab_hdr.sh_name == (unsigned int) -1 || elf_tdata (abfd)->shstrtab_hdr.sh_name == (unsigned int) -1) return FALSE; return TRUE; } /* Assign file positions for all the reloc sections which are not part of the loadable file image. */ void _bfd_elf_assign_file_positions_for_relocs (bfd *abfd) { file_ptr off; unsigned int i, num_sec; Elf_Internal_Shdr **shdrpp; off = elf_tdata (abfd)->next_file_pos; num_sec = elf_numsections (abfd); for (i = 1, shdrpp = elf_elfsections (abfd) + 1; i < num_sec; i++, shdrpp++) { Elf_Internal_Shdr *shdrp; shdrp = *shdrpp; if ((shdrp->sh_type == SHT_REL || shdrp->sh_type == SHT_RELA) && shdrp->sh_offset == -1) off = _bfd_elf_assign_file_position_for_section (shdrp, off, TRUE); } elf_tdata (abfd)->next_file_pos = off; } bfd_boolean _bfd_elf_write_object_contents (bfd *abfd) { const struct elf_backend_data *bed = get_elf_backend_data (abfd); Elf_Internal_Ehdr *i_ehdrp; Elf_Internal_Shdr **i_shdrp; bfd_boolean failed; unsigned int count, num_sec; if (! abfd->output_has_begun && ! _bfd_elf_compute_section_file_positions (abfd, NULL)) return FALSE; i_shdrp = elf_elfsections (abfd); i_ehdrp = elf_elfheader (abfd); failed = FALSE; bfd_map_over_sections (abfd, bed->s->write_relocs, &failed); if (failed) return FALSE; _bfd_elf_assign_file_positions_for_relocs (abfd); /* After writing the headers, we need to write the sections too... */ num_sec = elf_numsections (abfd); for (count = 1; count < num_sec; count++) { if (bed->elf_backend_section_processing) (*bed->elf_backend_section_processing) (abfd, i_shdrp[count]); if (i_shdrp[count]->contents) { bfd_size_type amt = i_shdrp[count]->sh_size; if (bfd_seek (abfd, i_shdrp[count]->sh_offset, SEEK_SET) != 0 || bfd_bwrite (i_shdrp[count]->contents, amt, abfd) != amt) return FALSE; } if (count == SHN_LORESERVE - 1) count += SHN_HIRESERVE + 1 - SHN_LORESERVE; } /* Write out the section header names. */ if (elf_shstrtab (abfd) != NULL && (bfd_seek (abfd, elf_tdata (abfd)->shstrtab_hdr.sh_offset, SEEK_SET) != 0 || !_bfd_elf_strtab_emit (abfd, elf_shstrtab (abfd)))) return FALSE; if (bed->elf_backend_final_write_processing) (*bed->elf_backend_final_write_processing) (abfd, elf_tdata (abfd)->linker); return bed->s->write_shdrs_and_ehdr (abfd); } bfd_boolean _bfd_elf_write_corefile_contents (bfd *abfd) { /* Hopefully this can be done just like an object file. */ return _bfd_elf_write_object_contents (abfd); } /* Given a section, search the header to find them. */ int _bfd_elf_section_from_bfd_section (bfd *abfd, struct bfd_section *asect) { const struct elf_backend_data *bed; int index; if (elf_section_data (asect) != NULL && elf_section_data (asect)->this_idx != 0) return elf_section_data (asect)->this_idx; if (bfd_is_abs_section (asect)) index = SHN_ABS; else if (bfd_is_com_section (asect)) index = SHN_COMMON; else if (bfd_is_und_section (asect)) index = SHN_UNDEF; else index = -1; bed = get_elf_backend_data (abfd); if (bed->elf_backend_section_from_bfd_section) { int retval = index; if ((*bed->elf_backend_section_from_bfd_section) (abfd, asect, &retval)) return retval; } if (index == -1) bfd_set_error (bfd_error_nonrepresentable_section); return index; } /* Given a BFD symbol, return the index in the ELF symbol table, or -1 on error. */ int _bfd_elf_symbol_from_bfd_symbol (bfd *abfd, asymbol **asym_ptr_ptr) { asymbol *asym_ptr = *asym_ptr_ptr; int idx; flagword flags = asym_ptr->flags; /* When gas creates relocations against local labels, it creates its own symbol for the section, but does put the symbol into the symbol chain, so udata is 0. When the linker is generating relocatable output, this section symbol may be for one of the input sections rather than the output section. */ if (asym_ptr->udata.i == 0 && (flags & BSF_SECTION_SYM) && asym_ptr->section) { asection *sec; int indx; sec = asym_ptr->section; if (sec->owner != abfd && sec->output_section != NULL) sec = sec->output_section; if (sec->owner == abfd && (indx = sec->index) < elf_num_section_syms (abfd) && elf_section_syms (abfd)[indx] != NULL) asym_ptr->udata.i = elf_section_syms (abfd)[indx]->udata.i; } idx = asym_ptr->udata.i; if (idx == 0) { /* This case can occur when using --strip-symbol on a symbol which is used in a relocation entry. */ (*_bfd_error_handler) (_("%B: symbol `%s' required but not present"), abfd, bfd_asymbol_name (asym_ptr)); bfd_set_error (bfd_error_no_symbols); return -1; } #if DEBUG & 4 { fprintf (stderr, "elf_symbol_from_bfd_symbol 0x%.8lx, name = %s, sym num = %d, flags = 0x%.8lx%s\n", (long) asym_ptr, asym_ptr->name, idx, flags, elf_symbol_flags (flags)); fflush (stderr); } #endif return idx; } /* Rewrite program header information. */ static bfd_boolean rewrite_elf_program_header (bfd *ibfd, bfd *obfd) { Elf_Internal_Ehdr *iehdr; struct elf_segment_map *map; struct elf_segment_map *map_first; struct elf_segment_map **pointer_to_map; Elf_Internal_Phdr *segment; asection *section; unsigned int i; unsigned int num_segments; bfd_boolean phdr_included = FALSE; bfd_vma maxpagesize; struct elf_segment_map *phdr_adjust_seg = NULL; unsigned int phdr_adjust_num = 0; const struct elf_backend_data *bed; bed = get_elf_backend_data (ibfd); iehdr = elf_elfheader (ibfd); map_first = NULL; pointer_to_map = &map_first; num_segments = elf_elfheader (ibfd)->e_phnum; maxpagesize = get_elf_backend_data (obfd)->maxpagesize; /* Returns the end address of the segment + 1. */ #define SEGMENT_END(segment, start) \ (start + (segment->p_memsz > segment->p_filesz \ ? segment->p_memsz : segment->p_filesz)) #define SECTION_SIZE(section, segment) \ (((section->flags & (SEC_HAS_CONTENTS | SEC_THREAD_LOCAL)) \ != SEC_THREAD_LOCAL || segment->p_type == PT_TLS) \ ? section->size : 0) /* Returns TRUE if the given section is contained within the given segment. VMA addresses are compared. */ #define IS_CONTAINED_BY_VMA(section, segment) \ (section->vma >= segment->p_vaddr \ && (section->vma + SECTION_SIZE (section, segment) \ <= (SEGMENT_END (segment, segment->p_vaddr)))) /* Returns TRUE if the given section is contained within the given segment. LMA addresses are compared. */ #define IS_CONTAINED_BY_LMA(section, segment, base) \ (section->lma >= base \ && (section->lma + SECTION_SIZE (section, segment) \ <= SEGMENT_END (segment, base))) /* Special case: corefile "NOTE" section containing regs, prpsinfo etc. */ #define IS_COREFILE_NOTE(p, s) \ (p->p_type == PT_NOTE \ && bfd_get_format (ibfd) == bfd_core \ && s->vma == 0 && s->lma == 0 \ && (bfd_vma) s->filepos >= p->p_offset \ && ((bfd_vma) s->filepos + s->size \ <= p->p_offset + p->p_filesz)) /* The complicated case when p_vaddr is 0 is to handle the Solaris linker, which generates a PT_INTERP section with p_vaddr and p_memsz set to 0. */ #define IS_SOLARIS_PT_INTERP(p, s) \ (p->p_vaddr == 0 \ && p->p_paddr == 0 \ && p->p_memsz == 0 \ && p->p_filesz > 0 \ && (s->flags & SEC_HAS_CONTENTS) != 0 \ && s->size > 0 \ && (bfd_vma) s->filepos >= p->p_offset \ && ((bfd_vma) s->filepos + s->size \ <= p->p_offset + p->p_filesz)) /* Decide if the given section should be included in the given segment. A section will be included if: 1. It is within the address space of the segment -- we use the LMA if that is set for the segment and the VMA otherwise, 2. It is an allocated segment, 3. There is an output section associated with it, 4. The section has not already been allocated to a previous segment. 5. PT_GNU_STACK segments do not include any sections. 6. PT_TLS segment includes only SHF_TLS sections. 7. SHF_TLS sections are only in PT_TLS or PT_LOAD segments. 8. PT_DYNAMIC should not contain empty sections at the beginning (with the possible exception of .dynamic). */ #define IS_SECTION_IN_INPUT_SEGMENT(section, segment, bed) \ ((((segment->p_paddr \ ? IS_CONTAINED_BY_LMA (section, segment, segment->p_paddr) \ : IS_CONTAINED_BY_VMA (section, segment)) \ && (section->flags & SEC_ALLOC) != 0) \ || IS_COREFILE_NOTE (segment, section)) \ && segment->p_type != PT_GNU_STACK \ && (segment->p_type != PT_TLS \ || (section->flags & SEC_THREAD_LOCAL)) \ && (segment->p_type == PT_LOAD \ || segment->p_type == PT_TLS \ || (section->flags & SEC_THREAD_LOCAL) == 0) \ && (segment->p_type != PT_DYNAMIC \ || SECTION_SIZE (section, segment) > 0 \ || (segment->p_paddr \ ? segment->p_paddr != section->lma \ : segment->p_vaddr != section->vma) \ || (strcmp (bfd_get_section_name (ibfd, section), ".dynamic") \ == 0)) \ && ! section->segment_mark) /* If the output section of a section in the input segment is NULL, it is removed from the corresponding output segment. */ #define INCLUDE_SECTION_IN_SEGMENT(section, segment, bed) \ (IS_SECTION_IN_INPUT_SEGMENT (section, segment, bed) \ && section->output_section != NULL) /* Returns TRUE iff seg1 starts after the end of seg2. */ #define SEGMENT_AFTER_SEGMENT(seg1, seg2, field) \ (seg1->field >= SEGMENT_END (seg2, seg2->field)) /* Returns TRUE iff seg1 and seg2 overlap. Segments overlap iff both their VMA address ranges and their LMA address ranges overlap. It is possible to have overlapping VMA ranges without overlapping LMA ranges. RedBoot images for example can have both .data and .bss mapped to the same VMA range, but with the .data section mapped to a different LMA. */ #define SEGMENT_OVERLAPS(seg1, seg2) \ ( !(SEGMENT_AFTER_SEGMENT (seg1, seg2, p_vaddr) \ || SEGMENT_AFTER_SEGMENT (seg2, seg1, p_vaddr)) \ && !(SEGMENT_AFTER_SEGMENT (seg1, seg2, p_paddr) \ || SEGMENT_AFTER_SEGMENT (seg2, seg1, p_paddr))) /* Initialise the segment mark field. */ for (section = ibfd->sections; section != NULL; section = section->next) section->segment_mark = FALSE; /* Scan through the segments specified in the program header of the input BFD. For this first scan we look for overlaps in the loadable segments. These can be created by weird parameters to objcopy. Also, fix some solaris weirdness. */ for (i = 0, segment = elf_tdata (ibfd)->phdr; i < num_segments; i++, segment++) { unsigned int j; Elf_Internal_Phdr *segment2; if (segment->p_type == PT_INTERP) for (section = ibfd->sections; section; section = section->next) if (IS_SOLARIS_PT_INTERP (segment, section)) { /* Mininal change so that the normal section to segment assignment code will work. */ segment->p_vaddr = section->vma; break; } if (segment->p_type != PT_LOAD) continue; /* Determine if this segment overlaps any previous segments. */ for (j = 0, segment2 = elf_tdata (ibfd)->phdr; j < i; j++, segment2 ++) { bfd_signed_vma extra_length; if (segment2->p_type != PT_LOAD || ! SEGMENT_OVERLAPS (segment, segment2)) continue; /* Merge the two segments together. */ if (segment2->p_vaddr < segment->p_vaddr) { /* Extend SEGMENT2 to include SEGMENT and then delete SEGMENT. */ extra_length = SEGMENT_END (segment, segment->p_vaddr) - SEGMENT_END (segment2, segment2->p_vaddr); if (extra_length > 0) { segment2->p_memsz += extra_length; segment2->p_filesz += extra_length; } segment->p_type = PT_NULL; /* Since we have deleted P we must restart the outer loop. */ i = 0; segment = elf_tdata (ibfd)->phdr; break; } else { /* Extend SEGMENT to include SEGMENT2 and then delete SEGMENT2. */ extra_length = SEGMENT_END (segment2, segment2->p_vaddr) - SEGMENT_END (segment, segment->p_vaddr); if (extra_length > 0) { segment->p_memsz += extra_length; segment->p_filesz += extra_length; } segment2->p_type = PT_NULL; } } } /* The second scan attempts to assign sections to segments. */ for (i = 0, segment = elf_tdata (ibfd)->phdr; i < num_segments; i ++, segment ++) { unsigned int section_count; asection ** sections; asection * output_section; unsigned int isec; bfd_vma matching_lma; bfd_vma suggested_lma; unsigned int j; bfd_size_type amt; asection * first_section; if (segment->p_type == PT_NULL) continue; first_section = NULL; /* Compute how many sections might be placed into this segment. */ for (section = ibfd->sections, section_count = 0; section != NULL; section = section->next) { /* Find the first section in the input segment, which may be removed from the corresponding output segment. */ if (IS_SECTION_IN_INPUT_SEGMENT (section, segment, bed)) { if (first_section == NULL) first_section = section; if (section->output_section != NULL) ++section_count; } } /* Allocate a segment map big enough to contain all of the sections we have selected. */ amt = sizeof (struct elf_segment_map); amt += ((bfd_size_type) section_count - 1) * sizeof (asection *); map = bfd_zalloc (obfd, amt); if (map == NULL) return FALSE; /* Initialise the fields of the segment map. Default to using the physical address of the segment in the input BFD. */ map->next = NULL; map->p_type = segment->p_type; map->p_flags = segment->p_flags; map->p_flags_valid = 1; /* If the first section in the input segment is removed, there is no need to preserve segment physical address in the corresponding output segment. */ if (!first_section || first_section->output_section != NULL) { map->p_paddr = segment->p_paddr; map->p_paddr_valid = 1; } /* Determine if this segment contains the ELF file header and if it contains the program headers themselves. */ map->includes_filehdr = (segment->p_offset == 0 && segment->p_filesz >= iehdr->e_ehsize); map->includes_phdrs = 0; if (! phdr_included || segment->p_type != PT_LOAD) { map->includes_phdrs = (segment->p_offset <= (bfd_vma) iehdr->e_phoff && (segment->p_offset + segment->p_filesz >= ((bfd_vma) iehdr->e_phoff + iehdr->e_phnum * iehdr->e_phentsize))); if (segment->p_type == PT_LOAD && map->includes_phdrs) phdr_included = TRUE; } if (section_count == 0) { /* Special segments, such as the PT_PHDR segment, may contain no sections, but ordinary, loadable segments should contain something. They are allowed by the ELF spec however, so only a warning is produced. */ if (segment->p_type == PT_LOAD) (*_bfd_error_handler) (_("%B: warning: Empty loadable segment detected, is this intentional ?\n"), ibfd); map->count = 0; *pointer_to_map = map; pointer_to_map = &map->next; continue; } /* Now scan the sections in the input BFD again and attempt to add their corresponding output sections to the segment map. The problem here is how to handle an output section which has been moved (ie had its LMA changed). There are four possibilities: 1. None of the sections have been moved. In this case we can continue to use the segment LMA from the input BFD. 2. All of the sections have been moved by the same amount. In this case we can change the segment's LMA to match the LMA of the first section. 3. Some of the sections have been moved, others have not. In this case those sections which have not been moved can be placed in the current segment which will have to have its size, and possibly its LMA changed, and a new segment or segments will have to be created to contain the other sections. 4. The sections have been moved, but not by the same amount. In this case we can change the segment's LMA to match the LMA of the first section and we will have to create a new segment or segments to contain the other sections. In order to save time, we allocate an array to hold the section pointers that we are interested in. As these sections get assigned to a segment, they are removed from this array. */ /* Gcc 2.96 miscompiles this code on mips. Don't do casting here to work around this long long bug. */ sections = bfd_malloc2 (section_count, sizeof (asection *)); if (sections == NULL) return FALSE; /* Step One: Scan for segment vs section LMA conflicts. Also add the sections to the section array allocated above. Also add the sections to the current segment. In the common case, where the sections have not been moved, this means that we have completely filled the segment, and there is nothing more to do. */ isec = 0; matching_lma = 0; suggested_lma = 0; for (j = 0, section = ibfd->sections; section != NULL; section = section->next) { if (INCLUDE_SECTION_IN_SEGMENT (section, segment, bed)) { output_section = section->output_section; sections[j ++] = section; /* The Solaris native linker always sets p_paddr to 0. We try to catch that case here, and set it to the correct value. Note - some backends require that p_paddr be left as zero. */ if (segment->p_paddr == 0 && segment->p_vaddr != 0 && (! bed->want_p_paddr_set_to_zero) && isec == 0 && output_section->lma != 0 && (output_section->vma == (segment->p_vaddr + (map->includes_filehdr ? iehdr->e_ehsize : 0) + (map->includes_phdrs ? (iehdr->e_phnum * iehdr->e_phentsize) : 0)))) map->p_paddr = segment->p_vaddr; /* Match up the physical address of the segment with the LMA address of the output section. */ if (IS_CONTAINED_BY_LMA (output_section, segment, map->p_paddr) || IS_COREFILE_NOTE (segment, section) || (bed->want_p_paddr_set_to_zero && IS_CONTAINED_BY_VMA (output_section, segment))) { if (matching_lma == 0) matching_lma = output_section->lma; /* We assume that if the section fits within the segment then it does not overlap any other section within that segment. */ map->sections[isec ++] = output_section; } else if (suggested_lma == 0) suggested_lma = output_section->lma; } } BFD_ASSERT (j == section_count); /* Step Two: Adjust the physical address of the current segment, if necessary. */ if (isec == section_count) { /* All of the sections fitted within the segment as currently specified. This is the default case. Add the segment to the list of built segments and carry on to process the next program header in the input BFD. */ map->count = section_count; *pointer_to_map = map; pointer_to_map = &map->next; if (matching_lma != map->p_paddr && !map->includes_filehdr && !map->includes_phdrs) /* There is some padding before the first section in the segment. So, we must account for that in the output segment's vma. */ map->p_vaddr_offset = matching_lma - map->p_paddr; free (sections); continue; } else { if (matching_lma != 0) { /* At least one section fits inside the current segment. Keep it, but modify its physical address to match the LMA of the first section that fitted. */ map->p_paddr = matching_lma; } else { /* None of the sections fitted inside the current segment. Change the current segment's physical address to match the LMA of the first section. */ map->p_paddr = suggested_lma; } /* Offset the segment physical address from the lma to allow for space taken up by elf headers. */ if (map->includes_filehdr) map->p_paddr -= iehdr->e_ehsize; if (map->includes_phdrs) { map->p_paddr -= iehdr->e_phnum * iehdr->e_phentsize; /* iehdr->e_phnum is just an estimate of the number of program headers that we will need. Make a note here of the number we used and the segment we chose to hold these headers, so that we can adjust the offset when we know the correct value. */ phdr_adjust_num = iehdr->e_phnum; phdr_adjust_seg = map; } } /* Step Three: Loop over the sections again, this time assigning those that fit to the current segment and removing them from the sections array; but making sure not to leave large gaps. Once all possible sections have been assigned to the current segment it is added to the list of built segments and if sections still remain to be assigned, a new segment is constructed before repeating the loop. */ isec = 0; do { map->count = 0; suggested_lma = 0; /* Fill the current segment with sections that fit. */ for (j = 0; j < section_count; j++) { section = sections[j]; if (section == NULL) continue; output_section = section->output_section; BFD_ASSERT (output_section != NULL); if (IS_CONTAINED_BY_LMA (output_section, segment, map->p_paddr) || IS_COREFILE_NOTE (segment, section)) { if (map->count == 0) { /* If the first section in a segment does not start at the beginning of the segment, then something is wrong. */ if (output_section->lma != (map->p_paddr + (map->includes_filehdr ? iehdr->e_ehsize : 0) + (map->includes_phdrs ? iehdr->e_phnum * iehdr->e_phentsize : 0))) abort (); } else { asection * prev_sec; prev_sec = map->sections[map->count - 1]; /* If the gap between the end of the previous section and the start of this section is more than maxpagesize then we need to start a new segment. */ if ((BFD_ALIGN (prev_sec->lma + prev_sec->size, maxpagesize) < BFD_ALIGN (output_section->lma, maxpagesize)) || ((prev_sec->lma + prev_sec->size) > output_section->lma)) { if (suggested_lma == 0) suggested_lma = output_section->lma; continue; } } map->sections[map->count++] = output_section; ++isec; sections[j] = NULL; section->segment_mark = TRUE; } else if (suggested_lma == 0) suggested_lma = output_section->lma; } BFD_ASSERT (map->count > 0); /* Add the current segment to the list of built segments. */ *pointer_to_map = map; pointer_to_map = &map->next; if (isec < section_count) { /* We still have not allocated all of the sections to segments. Create a new segment here, initialise it and carry on looping. */ amt = sizeof (struct elf_segment_map); amt += ((bfd_size_type) section_count - 1) * sizeof (asection *); map = bfd_alloc (obfd, amt); if (map == NULL) { free (sections); return FALSE; } /* Initialise the fields of the segment map. Set the physical physical address to the LMA of the first section that has not yet been assigned. */ map->next = NULL; map->p_type = segment->p_type; map->p_flags = segment->p_flags; map->p_flags_valid = 1; map->p_paddr = suggested_lma; map->p_paddr_valid = 1; map->includes_filehdr = 0; map->includes_phdrs = 0; } } while (isec < section_count); free (sections); } /* The Solaris linker creates program headers in which all the p_paddr fields are zero. When we try to objcopy or strip such a file, we get confused. Check for this case, and if we find it reset the p_paddr_valid fields. */ for (map = map_first; map != NULL; map = map->next) if (map->p_paddr != 0) break; if (map == NULL) for (map = map_first; map != NULL; map = map->next) map->p_paddr_valid = 0; elf_tdata (obfd)->segment_map = map_first; /* If we had to estimate the number of program headers that were going to be needed, then check our estimate now and adjust the offset if necessary. */ if (phdr_adjust_seg != NULL) { unsigned int count; for (count = 0, map = map_first; map != NULL; map = map->next) count++; if (count > phdr_adjust_num) phdr_adjust_seg->p_paddr -= (count - phdr_adjust_num) * iehdr->e_phentsize; } #undef SEGMENT_END #undef SECTION_SIZE #undef IS_CONTAINED_BY_VMA #undef IS_CONTAINED_BY_LMA #undef IS_COREFILE_NOTE #undef IS_SOLARIS_PT_INTERP #undef IS_SECTION_IN_INPUT_SEGMENT #undef INCLUDE_SECTION_IN_SEGMENT #undef SEGMENT_AFTER_SEGMENT #undef SEGMENT_OVERLAPS return TRUE; } /* Copy ELF program header information. */ static bfd_boolean copy_elf_program_header (bfd *ibfd, bfd *obfd) { Elf_Internal_Ehdr *iehdr; struct elf_segment_map *map; struct elf_segment_map *map_first; struct elf_segment_map **pointer_to_map; Elf_Internal_Phdr *segment; unsigned int i; unsigned int num_segments; bfd_boolean phdr_included = FALSE; iehdr = elf_elfheader (ibfd); map_first = NULL; pointer_to_map = &map_first; num_segments = elf_elfheader (ibfd)->e_phnum; for (i = 0, segment = elf_tdata (ibfd)->phdr; i < num_segments; i++, segment++) { asection *section; unsigned int section_count; bfd_size_type amt; Elf_Internal_Shdr *this_hdr; asection *first_section = NULL; /* FIXME: Do we need to copy PT_NULL segment? */ if (segment->p_type == PT_NULL) continue; /* Compute how many sections are in this segment. */ for (section = ibfd->sections, section_count = 0; section != NULL; section = section->next) { this_hdr = &(elf_section_data(section)->this_hdr); if (ELF_IS_SECTION_IN_SEGMENT_FILE (this_hdr, segment)) { if (!first_section) first_section = section; section_count++; } } /* Allocate a segment map big enough to contain all of the sections we have selected. */ amt = sizeof (struct elf_segment_map); if (section_count != 0) amt += ((bfd_size_type) section_count - 1) * sizeof (asection *); map = bfd_zalloc (obfd, amt); if (map == NULL) return FALSE; /* Initialize the fields of the output segment map with the input segment. */ map->next = NULL; map->p_type = segment->p_type; map->p_flags = segment->p_flags; map->p_flags_valid = 1; map->p_paddr = segment->p_paddr; map->p_paddr_valid = 1; map->p_align = segment->p_align; map->p_align_valid = 1; map->p_vaddr_offset = 0; /* Determine if this segment contains the ELF file header and if it contains the program headers themselves. */ map->includes_filehdr = (segment->p_offset == 0 && segment->p_filesz >= iehdr->e_ehsize); map->includes_phdrs = 0; if (! phdr_included || segment->p_type != PT_LOAD) { map->includes_phdrs = (segment->p_offset <= (bfd_vma) iehdr->e_phoff && (segment->p_offset + segment->p_filesz >= ((bfd_vma) iehdr->e_phoff + iehdr->e_phnum * iehdr->e_phentsize))); if (segment->p_type == PT_LOAD && map->includes_phdrs) phdr_included = TRUE; } if (!map->includes_phdrs && !map->includes_filehdr) /* There is some other padding before the first section. */ map->p_vaddr_offset = ((first_section ? first_section->lma : 0) - segment->p_paddr); if (section_count != 0) { unsigned int isec = 0; for (section = first_section; section != NULL; section = section->next) { this_hdr = &(elf_section_data(section)->this_hdr); if (ELF_IS_SECTION_IN_SEGMENT_FILE (this_hdr, segment)) { map->sections[isec++] = section->output_section; if (isec == section_count) break; } } } map->count = section_count; *pointer_to_map = map; pointer_to_map = &map->next; } elf_tdata (obfd)->segment_map = map_first; return TRUE; } /* Copy private BFD data. This copies or rewrites ELF program header information. */ static bfd_boolean copy_private_bfd_data (bfd *ibfd, bfd *obfd) { if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour || bfd_get_flavour (obfd) != bfd_target_elf_flavour) return TRUE; if (elf_tdata (ibfd)->phdr == NULL) return TRUE; if (ibfd->xvec == obfd->xvec) { /* Check to see if any sections in the input BFD covered by ELF program header have changed. */ Elf_Internal_Phdr *segment; asection *section, *osec; unsigned int i, num_segments; Elf_Internal_Shdr *this_hdr; /* Initialize the segment mark field. */ for (section = obfd->sections; section != NULL; section = section->next) section->segment_mark = FALSE; num_segments = elf_elfheader (ibfd)->e_phnum; for (i = 0, segment = elf_tdata (ibfd)->phdr; i < num_segments; i++, segment++) { /* PR binutils/3535. The Solaris linker always sets the p_paddr and p_memsz fields of special segments (DYNAMIC, INTERP) to 0 which severly confuses things, so always regenerate the segment map in this case. */ if (segment->p_paddr == 0 && segment->p_memsz == 0 && (segment->p_type == PT_INTERP || segment->p_type == PT_DYNAMIC)) goto rewrite; for (section = ibfd->sections; section != NULL; section = section->next) { /* We mark the output section so that we know it comes from the input BFD. */ osec = section->output_section; if (osec) osec->segment_mark = TRUE; /* Check if this section is covered by the segment. */ this_hdr = &(elf_section_data(section)->this_hdr); if (ELF_IS_SECTION_IN_SEGMENT_FILE (this_hdr, segment)) { /* FIXME: Check if its output section is changed or removed. What else do we need to check? */ if (osec == NULL || section->flags != osec->flags || section->lma != osec->lma || section->vma != osec->vma || section->size != osec->size || section->rawsize != osec->rawsize || section->alignment_power != osec->alignment_power) goto rewrite; } } } /* Check to see if any output section do not come from the input BFD. */ for (section = obfd->sections; section != NULL; section = section->next) { if (section->segment_mark == FALSE) goto rewrite; else section->segment_mark = FALSE; } return copy_elf_program_header (ibfd, obfd); } rewrite: return rewrite_elf_program_header (ibfd, obfd); } /* Initialize private output section information from input section. */ bfd_boolean _bfd_elf_init_private_section_data (bfd *ibfd, asection *isec, bfd *obfd, asection *osec, struct bfd_link_info *link_info) { Elf_Internal_Shdr *ihdr, *ohdr; bfd_boolean need_group = link_info == NULL || link_info->relocatable; if (ibfd->xvec->flavour != bfd_target_elf_flavour || obfd->xvec->flavour != bfd_target_elf_flavour) return TRUE; /* Don't copy the output ELF section type from input if the output BFD section flags have been set to something different. elf_fake_sections will set ELF section type based on BFD section flags. */ if (elf_section_type (osec) == SHT_NULL && (osec->flags == isec->flags || !osec->flags)) elf_section_type (osec) = elf_section_type (isec); /* FIXME: Is this correct for all OS/PROC specific flags? */ elf_section_flags (osec) |= (elf_section_flags (isec) & (SHF_MASKOS | SHF_MASKPROC)); /* Set things up for objcopy and relocatable link. The output SHT_GROUP section will have its elf_next_in_group pointing back to the input group members. Ignore linker created group section. See elfNN_ia64_object_p in elfxx-ia64.c. */ if (need_group) { if (elf_sec_group (isec) == NULL || (elf_sec_group (isec)->flags & SEC_LINKER_CREATED) == 0) { if (elf_section_flags (isec) & SHF_GROUP) elf_section_flags (osec) |= SHF_GROUP; elf_next_in_group (osec) = elf_next_in_group (isec); elf_group_name (osec) = elf_group_name (isec); } } ihdr = &elf_section_data (isec)->this_hdr; /* We need to handle elf_linked_to_section for SHF_LINK_ORDER. We don't use the output section of the linked-to section since it may be NULL at this point. */ if ((ihdr->sh_flags & SHF_LINK_ORDER) != 0) { ohdr = &elf_section_data (osec)->this_hdr; ohdr->sh_flags |= SHF_LINK_ORDER; elf_linked_to_section (osec) = elf_linked_to_section (isec); } osec->use_rela_p = isec->use_rela_p; return TRUE; } /* Copy private section information. This copies over the entsize field, and sometimes the info field. */ bfd_boolean _bfd_elf_copy_private_section_data (bfd *ibfd, asection *isec, bfd *obfd, asection *osec) { Elf_Internal_Shdr *ihdr, *ohdr; if (ibfd->xvec->flavour != bfd_target_elf_flavour || obfd->xvec->flavour != bfd_target_elf_flavour) return TRUE; ihdr = &elf_section_data (isec)->this_hdr; ohdr = &elf_section_data (osec)->this_hdr; ohdr->sh_entsize = ihdr->sh_entsize; if (ihdr->sh_type == SHT_SYMTAB || ihdr->sh_type == SHT_DYNSYM || ihdr->sh_type == SHT_GNU_verneed || ihdr->sh_type == SHT_GNU_verdef) ohdr->sh_info = ihdr->sh_info; return _bfd_elf_init_private_section_data (ibfd, isec, obfd, osec, NULL); } /* Copy private header information. */ bfd_boolean _bfd_elf_copy_private_header_data (bfd *ibfd, bfd *obfd) { asection *isec; if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour || bfd_get_flavour (obfd) != bfd_target_elf_flavour) return TRUE; /* Copy over private BFD data if it has not already been copied. This must be done here, rather than in the copy_private_bfd_data entry point, because the latter is called after the section contents have been set, which means that the program headers have already been worked out. */ if (elf_tdata (obfd)->segment_map == NULL && elf_tdata (ibfd)->phdr != NULL) { if (! copy_private_bfd_data (ibfd, obfd)) return FALSE; } /* _bfd_elf_copy_private_section_data copied over the SHF_GROUP flag but this might be wrong if we deleted the group section. */ for (isec = ibfd->sections; isec != NULL; isec = isec->next) if (elf_section_type (isec) == SHT_GROUP && isec->output_section == NULL) { asection *first = elf_next_in_group (isec); asection *s = first; while (s != NULL) { if (s->output_section != NULL) { elf_section_flags (s->output_section) &= ~SHF_GROUP; elf_group_name (s->output_section) = NULL; } s = elf_next_in_group (s); if (s == first) break; } } return TRUE; } /* Copy private symbol information. If this symbol is in a section which we did not map into a BFD section, try to map the section index correctly. We use special macro definitions for the mapped section indices; these definitions are interpreted by the swap_out_syms function. */ #define MAP_ONESYMTAB (SHN_HIOS + 1) #define MAP_DYNSYMTAB (SHN_HIOS + 2) #define MAP_STRTAB (SHN_HIOS + 3) #define MAP_SHSTRTAB (SHN_HIOS + 4) #define MAP_SYM_SHNDX (SHN_HIOS + 5) bfd_boolean _bfd_elf_copy_private_symbol_data (bfd *ibfd, asymbol *isymarg, bfd *obfd, asymbol *osymarg) { elf_symbol_type *isym, *osym; if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour || bfd_get_flavour (obfd) != bfd_target_elf_flavour) return TRUE; isym = elf_symbol_from (ibfd, isymarg); osym = elf_symbol_from (obfd, osymarg); if (isym != NULL && osym != NULL && bfd_is_abs_section (isym->symbol.section)) { unsigned int shndx; shndx = isym->internal_elf_sym.st_shndx; if (shndx == elf_onesymtab (ibfd)) shndx = MAP_ONESYMTAB; else if (shndx == elf_dynsymtab (ibfd)) shndx = MAP_DYNSYMTAB; else if (shndx == elf_tdata (ibfd)->strtab_section) shndx = MAP_STRTAB; else if (shndx == elf_tdata (ibfd)->shstrtab_section) shndx = MAP_SHSTRTAB; else if (shndx == elf_tdata (ibfd)->symtab_shndx_section) shndx = MAP_SYM_SHNDX; osym->internal_elf_sym.st_shndx = shndx; } return TRUE; } /* Swap out the symbols. */ static bfd_boolean swap_out_syms (bfd *abfd, struct bfd_strtab_hash **sttp, int relocatable_p) { const struct elf_backend_data *bed; int symcount; asymbol **syms; struct bfd_strtab_hash *stt; Elf_Internal_Shdr *symtab_hdr; Elf_Internal_Shdr *symtab_shndx_hdr; Elf_Internal_Shdr *symstrtab_hdr; bfd_byte *outbound_syms; bfd_byte *outbound_shndx; int idx; bfd_size_type amt; bfd_boolean name_local_sections; if (!elf_map_symbols (abfd)) return FALSE; /* Dump out the symtabs. */ stt = _bfd_elf_stringtab_init (); if (stt == NULL) return FALSE; bed = get_elf_backend_data (abfd); symcount = bfd_get_symcount (abfd); symtab_hdr = &elf_tdata (abfd)->symtab_hdr; symtab_hdr->sh_type = SHT_SYMTAB; symtab_hdr->sh_entsize = bed->s->sizeof_sym; symtab_hdr->sh_size = symtab_hdr->sh_entsize * (symcount + 1); symtab_hdr->sh_info = elf_num_locals (abfd) + 1; symtab_hdr->sh_addralign = 1 << bed->s->log_file_align; symstrtab_hdr = &elf_tdata (abfd)->strtab_hdr; symstrtab_hdr->sh_type = SHT_STRTAB; outbound_syms = bfd_alloc2 (abfd, 1 + symcount, bed->s->sizeof_sym); if (outbound_syms == NULL) { _bfd_stringtab_free (stt); return FALSE; } symtab_hdr->contents = outbound_syms; outbound_shndx = NULL; symtab_shndx_hdr = &elf_tdata (abfd)->symtab_shndx_hdr; if (symtab_shndx_hdr->sh_name != 0) { amt = (bfd_size_type) (1 + symcount) * sizeof (Elf_External_Sym_Shndx); outbound_shndx = bfd_zalloc2 (abfd, 1 + symcount, sizeof (Elf_External_Sym_Shndx)); if (outbound_shndx == NULL) { _bfd_stringtab_free (stt); return FALSE; } symtab_shndx_hdr->contents = outbound_shndx; symtab_shndx_hdr->sh_type = SHT_SYMTAB_SHNDX; symtab_shndx_hdr->sh_size = amt; symtab_shndx_hdr->sh_addralign = sizeof (Elf_External_Sym_Shndx); symtab_shndx_hdr->sh_entsize = sizeof (Elf_External_Sym_Shndx); } /* Now generate the data (for "contents"). */ { /* Fill in zeroth symbol and swap it out. */ Elf_Internal_Sym sym; sym.st_name = 0; sym.st_value = 0; sym.st_size = 0; sym.st_info = 0; sym.st_other = 0; sym.st_shndx = SHN_UNDEF; bed->s->swap_symbol_out (abfd, &sym, outbound_syms, outbound_shndx); outbound_syms += bed->s->sizeof_sym; if (outbound_shndx != NULL) outbound_shndx += sizeof (Elf_External_Sym_Shndx); } name_local_sections = (bed->elf_backend_name_local_section_symbols && bed->elf_backend_name_local_section_symbols (abfd)); syms = bfd_get_outsymbols (abfd); for (idx = 0; idx < symcount; idx++) { Elf_Internal_Sym sym; bfd_vma value = syms[idx]->value; elf_symbol_type *type_ptr; flagword flags = syms[idx]->flags; int type; if (!name_local_sections && (flags & (BSF_SECTION_SYM | BSF_GLOBAL)) == BSF_SECTION_SYM) { /* Local section symbols have no name. */ sym.st_name = 0; } else { sym.st_name = (unsigned long) _bfd_stringtab_add (stt, syms[idx]->name, TRUE, FALSE); if (sym.st_name == (unsigned long) -1) { _bfd_stringtab_free (stt); return FALSE; } } type_ptr = elf_symbol_from (abfd, syms[idx]); if ((flags & BSF_SECTION_SYM) == 0 && bfd_is_com_section (syms[idx]->section)) { /* ELF common symbols put the alignment into the `value' field, and the size into the `size' field. This is backwards from how BFD handles it, so reverse it here. */ sym.st_size = value; if (type_ptr == NULL || type_ptr->internal_elf_sym.st_value == 0) sym.st_value = value >= 16 ? 16 : (1 << bfd_log2 (value)); else sym.st_value = type_ptr->internal_elf_sym.st_value; sym.st_shndx = _bfd_elf_section_from_bfd_section (abfd, syms[idx]->section); } else { asection *sec = syms[idx]->section; int shndx; if (sec->output_section) { value += sec->output_offset; sec = sec->output_section; } /* Don't add in the section vma for relocatable output. */ if (! relocatable_p) value += sec->vma; sym.st_value = value; sym.st_size = type_ptr ? type_ptr->internal_elf_sym.st_size : 0; if (bfd_is_abs_section (sec) && type_ptr != NULL && type_ptr->internal_elf_sym.st_shndx != 0) { /* This symbol is in a real ELF section which we did not create as a BFD section. Undo the mapping done by copy_private_symbol_data. */ shndx = type_ptr->internal_elf_sym.st_shndx; switch (shndx) { case MAP_ONESYMTAB: shndx = elf_onesymtab (abfd); break; case MAP_DYNSYMTAB: shndx = elf_dynsymtab (abfd); break; case MAP_STRTAB: shndx = elf_tdata (abfd)->strtab_section; break; case MAP_SHSTRTAB: shndx = elf_tdata (abfd)->shstrtab_section; break; case MAP_SYM_SHNDX: shndx = elf_tdata (abfd)->symtab_shndx_section; break; default: break; } } else { shndx = _bfd_elf_section_from_bfd_section (abfd, sec); if (shndx == -1) { asection *sec2; /* Writing this would be a hell of a lot easier if we had some decent documentation on bfd, and knew what to expect of the library, and what to demand of applications. For example, it appears that `objcopy' might not set the section of a symbol to be a section that is actually in the output file. */ sec2 = bfd_get_section_by_name (abfd, sec->name); if (sec2 == NULL) { _bfd_error_handler (_("\ Unable to find equivalent output section for symbol '%s' from section '%s'"), syms[idx]->name ? syms[idx]->name : "", sec->name); bfd_set_error (bfd_error_invalid_operation); _bfd_stringtab_free (stt); return FALSE; } shndx = _bfd_elf_section_from_bfd_section (abfd, sec2); BFD_ASSERT (shndx != -1); } } sym.st_shndx = shndx; } if ((flags & BSF_THREAD_LOCAL) != 0) type = STT_TLS; else if ((flags & BSF_FUNCTION) != 0) type = STT_FUNC; else if ((flags & BSF_OBJECT) != 0) type = STT_OBJECT; else if ((flags & BSF_RELC) != 0) type = STT_RELC; else if ((flags & BSF_SRELC) != 0) type = STT_SRELC; else type = STT_NOTYPE; if (syms[idx]->section->flags & SEC_THREAD_LOCAL) type = STT_TLS; /* Processor-specific types. */ if (type_ptr != NULL && bed->elf_backend_get_symbol_type) type = ((*bed->elf_backend_get_symbol_type) (&type_ptr->internal_elf_sym, type)); if (flags & BSF_SECTION_SYM) { if (flags & BSF_GLOBAL) sym.st_info = ELF_ST_INFO (STB_GLOBAL, STT_SECTION); else sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_SECTION); } else if (bfd_is_com_section (syms[idx]->section)) sym.st_info = ELF_ST_INFO (STB_GLOBAL, type); else if (bfd_is_und_section (syms[idx]->section)) sym.st_info = ELF_ST_INFO (((flags & BSF_WEAK) ? STB_WEAK : STB_GLOBAL), type); else if (flags & BSF_FILE) sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FILE); else { int bind = STB_LOCAL; if (flags & BSF_LOCAL) bind = STB_LOCAL; else if (flags & BSF_WEAK) bind = STB_WEAK; else if (flags & BSF_GLOBAL) bind = STB_GLOBAL; sym.st_info = ELF_ST_INFO (bind, type); } if (type_ptr != NULL) sym.st_other = type_ptr->internal_elf_sym.st_other; else sym.st_other = 0; bed->s->swap_symbol_out (abfd, &sym, outbound_syms, outbound_shndx); outbound_syms += bed->s->sizeof_sym; if (outbound_shndx != NULL) outbound_shndx += sizeof (Elf_External_Sym_Shndx); } *sttp = stt; symstrtab_hdr->sh_size = _bfd_stringtab_size (stt); symstrtab_hdr->sh_type = SHT_STRTAB; symstrtab_hdr->sh_flags = 0; symstrtab_hdr->sh_addr = 0; symstrtab_hdr->sh_entsize = 0; symstrtab_hdr->sh_link = 0; symstrtab_hdr->sh_info = 0; symstrtab_hdr->sh_addralign = 1; return TRUE; } /* Return the number of bytes required to hold the symtab vector. Note that we base it on the count plus 1, since we will null terminate the vector allocated based on this size. However, the ELF symbol table always has a dummy entry as symbol #0, so it ends up even. */ long _bfd_elf_get_symtab_upper_bound (bfd *abfd) { long symcount; long symtab_size; Elf_Internal_Shdr *hdr = &elf_tdata (abfd)->symtab_hdr; symcount = hdr->sh_size / get_elf_backend_data (abfd)->s->sizeof_sym; symtab_size = (symcount + 1) * (sizeof (asymbol *)); if (symcount > 0) symtab_size -= sizeof (asymbol *); return symtab_size; } long _bfd_elf_get_dynamic_symtab_upper_bound (bfd *abfd) { long symcount; long symtab_size; Elf_Internal_Shdr *hdr = &elf_tdata (abfd)->dynsymtab_hdr; if (elf_dynsymtab (abfd) == 0) { bfd_set_error (bfd_error_invalid_operation); return -1; } symcount = hdr->sh_size / get_elf_backend_data (abfd)->s->sizeof_sym; symtab_size = (symcount + 1) * (sizeof (asymbol *)); if (symcount > 0) symtab_size -= sizeof (asymbol *); return symtab_size; } long _bfd_elf_get_reloc_upper_bound (bfd *abfd ATTRIBUTE_UNUSED, sec_ptr asect) { return (asect->reloc_count + 1) * sizeof (arelent *); } /* Canonicalize the relocs. */ long _bfd_elf_canonicalize_reloc (bfd *abfd, sec_ptr section, arelent **relptr, asymbol **symbols) { arelent *tblptr; unsigned int i; const struct elf_backend_data *bed = get_elf_backend_data (abfd); if (! bed->s->slurp_reloc_table (abfd, section, symbols, FALSE)) return -1; tblptr = section->relocation; for (i = 0; i < section->reloc_count; i++) *relptr++ = tblptr++; *relptr = NULL; return section->reloc_count; } long _bfd_elf_canonicalize_symtab (bfd *abfd, asymbol **allocation) { const struct elf_backend_data *bed = get_elf_backend_data (abfd); long symcount = bed->s->slurp_symbol_table (abfd, allocation, FALSE); if (symcount >= 0) bfd_get_symcount (abfd) = symcount; return symcount; } long _bfd_elf_canonicalize_dynamic_symtab (bfd *abfd, asymbol **allocation) { const struct elf_backend_data *bed = get_elf_backend_data (abfd); long symcount = bed->s->slurp_symbol_table (abfd, allocation, TRUE); if (symcount >= 0) bfd_get_dynamic_symcount (abfd) = symcount; return symcount; } /* Return the size required for the dynamic reloc entries. Any loadable section that was actually installed in the BFD, and has type SHT_REL or SHT_RELA, and uses the dynamic symbol table, is considered to be a dynamic reloc section. */ long _bfd_elf_get_dynamic_reloc_upper_bound (bfd *abfd) { long ret; asection *s; if (elf_dynsymtab (abfd) == 0) { bfd_set_error (bfd_error_invalid_operation); return -1; } ret = sizeof (arelent *); for (s = abfd->sections; s != NULL; s = s->next) if ((s->flags & SEC_LOAD) != 0 && elf_section_data (s)->this_hdr.sh_link == elf_dynsymtab (abfd) && (elf_section_data (s)->this_hdr.sh_type == SHT_REL || elf_section_data (s)->this_hdr.sh_type == SHT_RELA)) ret += ((s->size / elf_section_data (s)->this_hdr.sh_entsize) * sizeof (arelent *)); return ret; } /* Canonicalize the dynamic relocation entries. Note that we return the dynamic relocations as a single block, although they are actually associated with particular sections; the interface, which was designed for SunOS style shared libraries, expects that there is only one set of dynamic relocs. Any loadable section that was actually installed in the BFD, and has type SHT_REL or SHT_RELA, and uses the dynamic symbol table, is considered to be a dynamic reloc section. */ long _bfd_elf_canonicalize_dynamic_reloc (bfd *abfd, arelent **storage, asymbol **syms) { bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean); asection *s; long ret; if (elf_dynsymtab (abfd) == 0) { bfd_set_error (bfd_error_invalid_operation); return -1; } slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table; ret = 0; for (s = abfd->sections; s != NULL; s = s->next) { if ((s->flags & SEC_LOAD) != 0 && elf_section_data (s)->this_hdr.sh_link == elf_dynsymtab (abfd) && (elf_section_data (s)->this_hdr.sh_type == SHT_REL || elf_section_data (s)->this_hdr.sh_type == SHT_RELA)) { arelent *p; long count, i; if (! (*slurp_relocs) (abfd, s, syms, TRUE)) return -1; count = s->size / elf_section_data (s)->this_hdr.sh_entsize; p = s->relocation; for (i = 0; i < count; i++) *storage++ = p++; ret += count; } } *storage = NULL; return ret; } /* Read in the version information. */ bfd_boolean _bfd_elf_slurp_version_tables (bfd *abfd, bfd_boolean default_imported_symver) { bfd_byte *contents = NULL; unsigned int freeidx = 0; if (elf_dynverref (abfd) != 0) { Elf_Internal_Shdr *hdr; Elf_External_Verneed *everneed; Elf_Internal_Verneed *iverneed; unsigned int i; bfd_byte *contents_end; hdr = &elf_tdata (abfd)->dynverref_hdr; elf_tdata (abfd)->verref = bfd_zalloc2 (abfd, hdr->sh_info, sizeof (Elf_Internal_Verneed)); if (elf_tdata (abfd)->verref == NULL) goto error_return; elf_tdata (abfd)->cverrefs = hdr->sh_info; contents = bfd_malloc (hdr->sh_size); if (contents == NULL) { error_return_verref: elf_tdata (abfd)->verref = NULL; elf_tdata (abfd)->cverrefs = 0; goto error_return; } if (bfd_seek (abfd, hdr->sh_offset, SEEK_SET) != 0 || bfd_bread (contents, hdr->sh_size, abfd) != hdr->sh_size) goto error_return_verref; if (hdr->sh_info && hdr->sh_size < sizeof (Elf_External_Verneed)) goto error_return_verref; BFD_ASSERT (sizeof (Elf_External_Verneed) == sizeof (Elf_External_Vernaux)); contents_end = contents + hdr->sh_size - sizeof (Elf_External_Verneed); everneed = (Elf_External_Verneed *) contents; iverneed = elf_tdata (abfd)->verref; for (i = 0; i < hdr->sh_info; i++, iverneed++) { Elf_External_Vernaux *evernaux; Elf_Internal_Vernaux *ivernaux; unsigned int j; _bfd_elf_swap_verneed_in (abfd, everneed, iverneed); iverneed->vn_bfd = abfd; iverneed->vn_filename = bfd_elf_string_from_elf_section (abfd, hdr->sh_link, iverneed->vn_file); if (iverneed->vn_filename == NULL) goto error_return_verref; if (iverneed->vn_cnt == 0) iverneed->vn_auxptr = NULL; else { iverneed->vn_auxptr = bfd_alloc2 (abfd, iverneed->vn_cnt, sizeof (Elf_Internal_Vernaux)); if (iverneed->vn_auxptr == NULL) goto error_return_verref; } if (iverneed->vn_aux > (size_t) (contents_end - (bfd_byte *) everneed)) goto error_return_verref; evernaux = ((Elf_External_Vernaux *) ((bfd_byte *) everneed + iverneed->vn_aux)); ivernaux = iverneed->vn_auxptr; for (j = 0; j < iverneed->vn_cnt; j++, ivernaux++) { _bfd_elf_swap_vernaux_in (abfd, evernaux, ivernaux); ivernaux->vna_nodename = bfd_elf_string_from_elf_section (abfd, hdr->sh_link, ivernaux->vna_name); if (ivernaux->vna_nodename == NULL) goto error_return_verref; if (j + 1 < iverneed->vn_cnt) ivernaux->vna_nextptr = ivernaux + 1; else ivernaux->vna_nextptr = NULL; if (ivernaux->vna_next > (size_t) (contents_end - (bfd_byte *) evernaux)) goto error_return_verref; evernaux = ((Elf_External_Vernaux *) ((bfd_byte *) evernaux + ivernaux->vna_next)); if (ivernaux->vna_other > freeidx) freeidx = ivernaux->vna_other; } if (i + 1 < hdr->sh_info) iverneed->vn_nextref = iverneed + 1; else iverneed->vn_nextref = NULL; if (iverneed->vn_next > (size_t) (contents_end - (bfd_byte *) everneed)) goto error_return_verref; everneed = ((Elf_External_Verneed *) ((bfd_byte *) everneed + iverneed->vn_next)); } free (contents); contents = NULL; } if (elf_dynverdef (abfd) != 0) { Elf_Internal_Shdr *hdr; Elf_External_Verdef *everdef; Elf_Internal_Verdef *iverdef; Elf_Internal_Verdef *iverdefarr; Elf_Internal_Verdef iverdefmem; unsigned int i; unsigned int maxidx; bfd_byte *contents_end_def, *contents_end_aux; hdr = &elf_tdata (abfd)->dynverdef_hdr; contents = bfd_malloc (hdr->sh_size); if (contents == NULL) goto error_return; if (bfd_seek (abfd, hdr->sh_offset, SEEK_SET) != 0 || bfd_bread (contents, hdr->sh_size, abfd) != hdr->sh_size) goto error_return; if (hdr->sh_info && hdr->sh_size < sizeof (Elf_External_Verdef)) goto error_return; BFD_ASSERT (sizeof (Elf_External_Verdef) >= sizeof (Elf_External_Verdaux)); contents_end_def = contents + hdr->sh_size - sizeof (Elf_External_Verdef); contents_end_aux = contents + hdr->sh_size - sizeof (Elf_External_Verdaux); /* We know the number of entries in the section but not the maximum index. Therefore we have to run through all entries and find the maximum. */ everdef = (Elf_External_Verdef *) contents; maxidx = 0; for (i = 0; i < hdr->sh_info; ++i) { _bfd_elf_swap_verdef_in (abfd, everdef, &iverdefmem); if ((iverdefmem.vd_ndx & ((unsigned) VERSYM_VERSION)) > maxidx) maxidx = iverdefmem.vd_ndx & ((unsigned) VERSYM_VERSION); if (iverdefmem.vd_next > (size_t) (contents_end_def - (bfd_byte *) everdef)) goto error_return; everdef = ((Elf_External_Verdef *) ((bfd_byte *) everdef + iverdefmem.vd_next)); } if (default_imported_symver) { if (freeidx > maxidx) maxidx = ++freeidx; else freeidx = ++maxidx; } elf_tdata (abfd)->verdef = bfd_zalloc2 (abfd, maxidx, sizeof (Elf_Internal_Verdef)); if (elf_tdata (abfd)->verdef == NULL) goto error_return; elf_tdata (abfd)->cverdefs = maxidx; everdef = (Elf_External_Verdef *) contents; iverdefarr = elf_tdata (abfd)->verdef; for (i = 0; i < hdr->sh_info; i++) { Elf_External_Verdaux *everdaux; Elf_Internal_Verdaux *iverdaux; unsigned int j; _bfd_elf_swap_verdef_in (abfd, everdef, &iverdefmem); if ((iverdefmem.vd_ndx & VERSYM_VERSION) == 0) { error_return_verdef: elf_tdata (abfd)->verdef = NULL; elf_tdata (abfd)->cverdefs = 0; goto error_return; } iverdef = &iverdefarr[(iverdefmem.vd_ndx & VERSYM_VERSION) - 1]; memcpy (iverdef, &iverdefmem, sizeof (Elf_Internal_Verdef)); iverdef->vd_bfd = abfd; if (iverdef->vd_cnt == 0) iverdef->vd_auxptr = NULL; else { iverdef->vd_auxptr = bfd_alloc2 (abfd, iverdef->vd_cnt, sizeof (Elf_Internal_Verdaux)); if (iverdef->vd_auxptr == NULL) goto error_return_verdef; } if (iverdef->vd_aux > (size_t) (contents_end_aux - (bfd_byte *) everdef)) goto error_return_verdef; everdaux = ((Elf_External_Verdaux *) ((bfd_byte *) everdef + iverdef->vd_aux)); iverdaux = iverdef->vd_auxptr; for (j = 0; j < iverdef->vd_cnt; j++, iverdaux++) { _bfd_elf_swap_verdaux_in (abfd, everdaux, iverdaux); iverdaux->vda_nodename = bfd_elf_string_from_elf_section (abfd, hdr->sh_link, iverdaux->vda_name); if (iverdaux->vda_nodename == NULL) goto error_return_verdef; if (j + 1 < iverdef->vd_cnt) iverdaux->vda_nextptr = iverdaux + 1; else iverdaux->vda_nextptr = NULL; if (iverdaux->vda_next > (size_t) (contents_end_aux - (bfd_byte *) everdaux)) goto error_return_verdef; everdaux = ((Elf_External_Verdaux *) ((bfd_byte *) everdaux + iverdaux->vda_next)); } if (iverdef->vd_cnt) iverdef->vd_nodename = iverdef->vd_auxptr->vda_nodename; if ((size_t) (iverdef - iverdefarr) + 1 < maxidx) iverdef->vd_nextdef = iverdef + 1; else iverdef->vd_nextdef = NULL; everdef = ((Elf_External_Verdef *) ((bfd_byte *) everdef + iverdef->vd_next)); } free (contents); contents = NULL; } else if (default_imported_symver) { if (freeidx < 3) freeidx = 3; else freeidx++; elf_tdata (abfd)->verdef = bfd_zalloc2 (abfd, freeidx, sizeof (Elf_Internal_Verdef)); if (elf_tdata (abfd)->verdef == NULL) goto error_return; elf_tdata (abfd)->cverdefs = freeidx; } /* Create a default version based on the soname. */ if (default_imported_symver) { Elf_Internal_Verdef *iverdef; Elf_Internal_Verdaux *iverdaux; iverdef = &elf_tdata (abfd)->verdef[freeidx - 1];; iverdef->vd_version = VER_DEF_CURRENT; iverdef->vd_flags = 0; iverdef->vd_ndx = freeidx; iverdef->vd_cnt = 1; iverdef->vd_bfd = abfd; iverdef->vd_nodename = bfd_elf_get_dt_soname (abfd); if (iverdef->vd_nodename == NULL) goto error_return_verdef; iverdef->vd_nextdef = NULL; iverdef->vd_auxptr = bfd_alloc (abfd, sizeof (Elf_Internal_Verdaux)); if (iverdef->vd_auxptr == NULL) goto error_return_verdef; iverdaux = iverdef->vd_auxptr; iverdaux->vda_nodename = iverdef->vd_nodename; iverdaux->vda_nextptr = NULL; } return TRUE; error_return: if (contents != NULL) free (contents); return FALSE; } asymbol * _bfd_elf_make_empty_symbol (bfd *abfd) { elf_symbol_type *newsym; bfd_size_type amt = sizeof (elf_symbol_type); newsym = bfd_zalloc (abfd, amt); if (!newsym) return NULL; else { newsym->symbol.the_bfd = abfd; return &newsym->symbol; } } void _bfd_elf_get_symbol_info (bfd *abfd ATTRIBUTE_UNUSED, asymbol *symbol, symbol_info *ret) { bfd_symbol_info (symbol, ret); } /* Return whether a symbol name implies a local symbol. Most targets use this function for the is_local_label_name entry point, but some override it. */ bfd_boolean _bfd_elf_is_local_label_name (bfd *abfd ATTRIBUTE_UNUSED, const char *name) { /* Normal local symbols start with ``.L''. */ if (name[0] == '.' && name[1] == 'L') return TRUE; /* At least some SVR4 compilers (e.g., UnixWare 2.1 cc) generate DWARF debugging symbols starting with ``..''. */ if (name[0] == '.' && name[1] == '.') return TRUE; /* gcc will sometimes generate symbols beginning with ``_.L_'' when emitting DWARF debugging output. I suspect this is actually a small bug in gcc (it calls ASM_OUTPUT_LABEL when it should call ASM_GENERATE_INTERNAL_LABEL, and this causes the leading underscore to be emitted on some ELF targets). For ease of use, we treat such symbols as local. */ if (name[0] == '_' && name[1] == '.' && name[2] == 'L' && name[3] == '_') return TRUE; return FALSE; } alent * _bfd_elf_get_lineno (bfd *abfd ATTRIBUTE_UNUSED, asymbol *symbol ATTRIBUTE_UNUSED) { abort (); return NULL; } bfd_boolean _bfd_elf_set_arch_mach (bfd *abfd, enum bfd_architecture arch, unsigned long machine) { /* If this isn't the right architecture for this backend, and this isn't the generic backend, fail. */ if (arch != get_elf_backend_data (abfd)->arch && arch != bfd_arch_unknown && get_elf_backend_data (abfd)->arch != bfd_arch_unknown) return FALSE; return bfd_default_set_arch_mach (abfd, arch, machine); } /* Find the function to a particular section and offset, for error reporting. */ static bfd_boolean elf_find_function (bfd *abfd ATTRIBUTE_UNUSED, asection *section, asymbol **symbols, bfd_vma offset, const char **filename_ptr, const char **functionname_ptr) { const char *filename; asymbol *func, *file; bfd_vma low_func; asymbol **p; /* ??? Given multiple file symbols, it is impossible to reliably choose the right file name for global symbols. File symbols are local symbols, and thus all file symbols must sort before any global symbols. The ELF spec may be interpreted to say that a file symbol must sort before other local symbols, but currently ld -r doesn't do this. So, for ld -r output, it is possible to make a better choice of file name for local symbols by ignoring file symbols appearing after a given local symbol. */ enum { nothing_seen, symbol_seen, file_after_symbol_seen } state; filename = NULL; func = NULL; file = NULL; low_func = 0; state = nothing_seen; for (p = symbols; *p != NULL; p++) { elf_symbol_type *q; q = (elf_symbol_type *) *p; switch (ELF_ST_TYPE (q->internal_elf_sym.st_info)) { default: break; case STT_FILE: file = &q->symbol; if (state == symbol_seen) state = file_after_symbol_seen; continue; case STT_NOTYPE: case STT_FUNC: if (bfd_get_section (&q->symbol) == section && q->symbol.value >= low_func && q->symbol.value <= offset) { func = (asymbol *) q; low_func = q->symbol.value; filename = NULL; if (file != NULL && (ELF_ST_BIND (q->internal_elf_sym.st_info) == STB_LOCAL || state != file_after_symbol_seen)) filename = bfd_asymbol_name (file); } break; } if (state == nothing_seen) state = symbol_seen; } if (func == NULL) return FALSE; if (filename_ptr) *filename_ptr = filename; if (functionname_ptr) *functionname_ptr = bfd_asymbol_name (func); return TRUE; } /* Find the nearest line to a particular section and offset, for error reporting. */ bfd_boolean _bfd_elf_find_nearest_line (bfd *abfd, asection *section, asymbol **symbols, bfd_vma offset, const char **filename_ptr, const char **functionname_ptr, unsigned int *line_ptr) { bfd_boolean found; if (_bfd_dwarf1_find_nearest_line (abfd, section, symbols, offset, filename_ptr, functionname_ptr, line_ptr)) { if (!*functionname_ptr) elf_find_function (abfd, section, symbols, offset, *filename_ptr ? NULL : filename_ptr, functionname_ptr); return TRUE; } if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset, filename_ptr, functionname_ptr, line_ptr, 0, &elf_tdata (abfd)->dwarf2_find_line_info)) { if (!*functionname_ptr) elf_find_function (abfd, section, symbols, offset, *filename_ptr ? NULL : filename_ptr, functionname_ptr); return TRUE; } if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset, &found, filename_ptr, functionname_ptr, line_ptr, &elf_tdata (abfd)->line_info)) return FALSE; if (found && (*functionname_ptr || *line_ptr)) return TRUE; if (symbols == NULL) return FALSE; if (! elf_find_function (abfd, section, symbols, offset, filename_ptr, functionname_ptr)) return FALSE; *line_ptr = 0; return TRUE; } /* Find the line for a symbol. */ bfd_boolean _bfd_elf_find_line (bfd *abfd, asymbol **symbols, asymbol *symbol, const char **filename_ptr, unsigned int *line_ptr) { return _bfd_dwarf2_find_line (abfd, symbols, symbol, filename_ptr, line_ptr, 0, &elf_tdata (abfd)->dwarf2_find_line_info); } /* After a call to bfd_find_nearest_line, successive calls to bfd_find_inliner_info can be used to get source information about each level of function inlining that terminated at the address passed to bfd_find_nearest_line. Currently this is only supported for DWARF2 with appropriate DWARF3 extensions. */ bfd_boolean _bfd_elf_find_inliner_info (bfd *abfd, const char **filename_ptr, const char **functionname_ptr, unsigned int *line_ptr) { bfd_boolean found; found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr, functionname_ptr, line_ptr, & elf_tdata (abfd)->dwarf2_find_line_info); return found; } int _bfd_elf_sizeof_headers (bfd *abfd, struct bfd_link_info *info) { const struct elf_backend_data *bed = get_elf_backend_data (abfd); int ret = bed->s->sizeof_ehdr; if (!info->relocatable) { bfd_size_type phdr_size = elf_tdata (abfd)->program_header_size; if (phdr_size == (bfd_size_type) -1) { struct elf_segment_map *m; phdr_size = 0; for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next) phdr_size += bed->s->sizeof_phdr; if (phdr_size == 0) phdr_size = get_program_header_size (abfd, info); } elf_tdata (abfd)->program_header_size = phdr_size; ret += phdr_size; } return ret; } bfd_boolean _bfd_elf_set_section_contents (bfd *abfd, sec_ptr section, const void *location, file_ptr offset, bfd_size_type count) { Elf_Internal_Shdr *hdr; bfd_signed_vma pos; if (! abfd->output_has_begun && ! _bfd_elf_compute_section_file_positions (abfd, NULL)) return FALSE; hdr = &elf_section_data (section)->this_hdr; pos = hdr->sh_offset + offset; if (bfd_seek (abfd, pos, SEEK_SET) != 0 || bfd_bwrite (location, count, abfd) != count) return FALSE; return TRUE; } void _bfd_elf_no_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr ATTRIBUTE_UNUSED, Elf_Internal_Rela *dst ATTRIBUTE_UNUSED) { abort (); } /* Try to convert a non-ELF reloc into an ELF one. */ bfd_boolean _bfd_elf_validate_reloc (bfd *abfd, arelent *areloc) { /* Check whether we really have an ELF howto. */ if ((*areloc->sym_ptr_ptr)->the_bfd->xvec != abfd->xvec) { bfd_reloc_code_real_type code; reloc_howto_type *howto; /* Alien reloc: Try to determine its type to replace it with an equivalent ELF reloc. */ if (areloc->howto->pc_relative) { switch (areloc->howto->bitsize) { case 8: code = BFD_RELOC_8_PCREL; break; case 12: code = BFD_RELOC_12_PCREL; break; case 16: code = BFD_RELOC_16_PCREL; break; case 24: code = BFD_RELOC_24_PCREL; break; case 32: code = BFD_RELOC_32_PCREL; break; case 64: code = BFD_RELOC_64_PCREL; break; default: goto fail; } howto = bfd_reloc_type_lookup (abfd, code); if (areloc->howto->pcrel_offset != howto->pcrel_offset) { if (howto->pcrel_offset) areloc->addend += areloc->address; else areloc->addend -= areloc->address; /* addend is unsigned!! */ } } else { switch (areloc->howto->bitsize) { case 8: code = BFD_RELOC_8; break; case 14: code = BFD_RELOC_14; break; case 16: code = BFD_RELOC_16; break; case 26: code = BFD_RELOC_26; break; case 32: code = BFD_RELOC_32; break; case 64: code = BFD_RELOC_64; break; default: goto fail; } howto = bfd_reloc_type_lookup (abfd, code); } if (howto) areloc->howto = howto; else goto fail; } return TRUE; fail: (*_bfd_error_handler) (_("%B: unsupported relocation type %s"), abfd, areloc->howto->name); bfd_set_error (bfd_error_bad_value); return FALSE; } bfd_boolean _bfd_elf_close_and_cleanup (bfd *abfd) { if (bfd_get_format (abfd) == bfd_object) { if (elf_tdata (abfd) != NULL && elf_shstrtab (abfd) != NULL) _bfd_elf_strtab_free (elf_shstrtab (abfd)); _bfd_dwarf2_cleanup_debug_info (abfd); } return _bfd_generic_close_and_cleanup (abfd); } /* For Rel targets, we encode meaningful data for BFD_RELOC_VTABLE_ENTRY in the relocation's offset. Thus we cannot allow any sort of sanity range-checking to interfere. There is nothing else to do in processing this reloc. */ bfd_reloc_status_type _bfd_elf_rel_vtable_reloc_fn (bfd *abfd ATTRIBUTE_UNUSED, arelent *re ATTRIBUTE_UNUSED, struct bfd_symbol *symbol ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED, asection *is ATTRIBUTE_UNUSED, bfd *obfd ATTRIBUTE_UNUSED, char **errmsg ATTRIBUTE_UNUSED) { return bfd_reloc_ok; } /* Elf core file support. Much of this only works on native toolchains, since we rely on knowing the machine-dependent procfs structure in order to pick out details about the corefile. */ #ifdef HAVE_SYS_PROCFS_H # include /* Define HAVE_THRMISC_T for consistency with other similar GNU-type stubs. */ #undef HAVE_THRMISC_T #if defined (THRMISC_VERSION) #define HAVE_THRMISC_T 1 #endif #endif /* FIXME: this is kinda wrong, but it's what gdb wants. */ static int elfcore_make_pid (bfd *abfd) { return ((elf_tdata (abfd)->core_lwpid << 16) + (elf_tdata (abfd)->core_pid)); } /* If there isn't a section called NAME, make one, using data from SECT. Note, this function will generate a reference to NAME, so you shouldn't deallocate or overwrite it. */ static bfd_boolean elfcore_maybe_make_sect (bfd *abfd, char *name, asection *sect) { asection *sect2; if (bfd_get_section_by_name (abfd, name) != NULL) return TRUE; sect2 = bfd_make_section_with_flags (abfd, name, sect->flags); if (sect2 == NULL) return FALSE; sect2->size = sect->size; sect2->filepos = sect->filepos; sect2->alignment_power = sect->alignment_power; return TRUE; } /* Create a pseudosection containing SIZE bytes at FILEPOS. This actually creates up to two pseudosections: - For the single-threaded case, a section named NAME, unless such a section already exists. - For the multi-threaded case, a section named "NAME/PID", where PID is elfcore_make_pid (abfd). Both pseudosections have identical contents. */ bfd_boolean _bfd_elfcore_make_pseudosection (bfd *abfd, char *name, size_t size, ufile_ptr filepos) { char buf[100]; char *threaded_name; size_t len; asection *sect; /* Build the section name. */ sprintf (buf, "%s/%d", name, elfcore_make_pid (abfd)); len = strlen (buf) + 1; threaded_name = bfd_alloc (abfd, len); if (threaded_name == NULL) return FALSE; memcpy (threaded_name, buf, len); sect = bfd_make_section_anyway_with_flags (abfd, threaded_name, SEC_HAS_CONTENTS); if (sect == NULL) return FALSE; sect->size = size; sect->filepos = filepos; sect->alignment_power = 2; return elfcore_maybe_make_sect (abfd, name, sect); } /* prstatus_t exists on: solaris 2.5+ linux 2.[01] + glibc unixware 4.2 */ #if defined (HAVE_PRSTATUS_T) static bfd_boolean elfcore_grok_prstatus (bfd *abfd, Elf_Internal_Note *note) { size_t size; int offset; if (note->descsz == sizeof (prstatus_t)) { prstatus_t prstat; size = sizeof (prstat.pr_reg); offset = offsetof (prstatus_t, pr_reg); memcpy (&prstat, note->descdata, sizeof (prstat)); /* Do not overwrite the core signal if it has already been set by another thread. */ if (elf_tdata (abfd)->core_signal == 0) elf_tdata (abfd)->core_signal = prstat.pr_cursig; elf_tdata (abfd)->core_pid = prstat.pr_pid; /* pr_who exists on: solaris 2.5+ unixware 4.2 pr_who doesn't exist on: linux 2.[01] */ #if defined (HAVE_PRSTATUS_T_PR_WHO) elf_tdata (abfd)->core_lwpid = prstat.pr_who; #endif } #if defined (HAVE_PRSTATUS32_T) else if (note->descsz == sizeof (prstatus32_t)) { /* 64-bit host, 32-bit corefile */ prstatus32_t prstat; size = sizeof (prstat.pr_reg); offset = offsetof (prstatus32_t, pr_reg); memcpy (&prstat, note->descdata, sizeof (prstat)); /* Do not overwrite the core signal if it has already been set by another thread. */ if (elf_tdata (abfd)->core_signal == 0) elf_tdata (abfd)->core_signal = prstat.pr_cursig; elf_tdata (abfd)->core_pid = prstat.pr_pid; /* pr_who exists on: solaris 2.5+ unixware 4.2 pr_who doesn't exist on: linux 2.[01] */ #if defined (HAVE_PRSTATUS32_T_PR_WHO) elf_tdata (abfd)->core_lwpid = prstat.pr_who; #endif } #endif /* HAVE_PRSTATUS32_T */ else { /* Fail - we don't know how to handle any other note size (ie. data object type). */ return TRUE; } /* Make a ".reg/999" section and a ".reg" section. */ return _bfd_elfcore_make_pseudosection (abfd, ".reg", size, note->descpos + offset); } #endif /* defined (HAVE_PRSTATUS_T) */ /* Create a pseudosection containing the exact contents of NOTE. */ static bfd_boolean elfcore_make_note_pseudosection (bfd *abfd, char *name, Elf_Internal_Note *note) { return _bfd_elfcore_make_pseudosection (abfd, name, note->descsz, note->descpos); } /* There isn't a consistent prfpregset_t across platforms, but it doesn't matter, because we don't have to pick this data structure apart. */ static bfd_boolean elfcore_grok_prfpreg (bfd *abfd, Elf_Internal_Note *note) { return elfcore_make_note_pseudosection (abfd, ".reg2", note); } /* Linux dumps the Intel SSE regs in a note named "LINUX" with a note type of 5 (NT_PRXFPREG). Just include the whole note's contents literally. */ static bfd_boolean elfcore_grok_prxfpreg (bfd *abfd, Elf_Internal_Note *note) { return elfcore_make_note_pseudosection (abfd, ".reg-xfp", note); } #if defined (HAVE_THRMISC_T) static bfd_boolean elfcore_grok_thrmisc (bfd *abfd, Elf_Internal_Note *note) { return elfcore_make_note_pseudosection (abfd, ".tname", note); } #endif /* defined (HAVE_THRMISC_T) */ #if defined (HAVE_PRPSINFO_T) typedef prpsinfo_t elfcore_psinfo_t; #if defined (HAVE_PRPSINFO32_T) /* Sparc64 cross Sparc32 */ typedef prpsinfo32_t elfcore_psinfo32_t; #endif #endif #if defined (HAVE_PSINFO_T) typedef psinfo_t elfcore_psinfo_t; #if defined (HAVE_PSINFO32_T) /* Sparc64 cross Sparc32 */ typedef psinfo32_t elfcore_psinfo32_t; #endif #endif /* return a malloc'ed copy of a string at START which is at most MAX bytes long, possibly without a terminating '\0'. the copy will always have a terminating '\0'. */ char * _bfd_elfcore_strndup (bfd *abfd, char *start, size_t max) { char *dups; char *end = memchr (start, '\0', max); size_t len; if (end == NULL) len = max; else len = end - start; dups = bfd_alloc (abfd, len + 1); if (dups == NULL) return NULL; memcpy (dups, start, len); dups[len] = '\0'; return dups; } #if defined (HAVE_PRPSINFO_T) || defined (HAVE_PSINFO_T) static bfd_boolean elfcore_grok_psinfo (bfd *abfd, Elf_Internal_Note *note) { if (note->descsz == sizeof (elfcore_psinfo_t)) { elfcore_psinfo_t psinfo; memcpy (&psinfo, note->descdata, sizeof (psinfo)); elf_tdata (abfd)->core_program = _bfd_elfcore_strndup (abfd, psinfo.pr_fname, sizeof (psinfo.pr_fname)); elf_tdata (abfd)->core_command = _bfd_elfcore_strndup (abfd, psinfo.pr_psargs, sizeof (psinfo.pr_psargs)); } #if defined (HAVE_PRPSINFO32_T) || defined (HAVE_PSINFO32_T) else if (note->descsz == sizeof (elfcore_psinfo32_t)) { /* 64-bit host, 32-bit corefile */ elfcore_psinfo32_t psinfo; memcpy (&psinfo, note->descdata, sizeof (psinfo)); elf_tdata (abfd)->core_program = _bfd_elfcore_strndup (abfd, psinfo.pr_fname, sizeof (psinfo.pr_fname)); elf_tdata (abfd)->core_command = _bfd_elfcore_strndup (abfd, psinfo.pr_psargs, sizeof (psinfo.pr_psargs)); } #endif else { /* Fail - we don't know how to handle any other note size (ie. data object type). */ return TRUE; } /* Note that for some reason, a spurious space is tacked onto the end of the args in some (at least one anyway) implementations, so strip it off if it exists. */ { char *command = elf_tdata (abfd)->core_command; int n = strlen (command); if (0 < n && command[n - 1] == ' ') command[n - 1] = '\0'; } return TRUE; } #endif /* defined (HAVE_PRPSINFO_T) || defined (HAVE_PSINFO_T) */ #if defined (HAVE_PSTATUS_T) static bfd_boolean elfcore_grok_pstatus (bfd *abfd, Elf_Internal_Note *note) { if (note->descsz == sizeof (pstatus_t) #if defined (HAVE_PXSTATUS_T) || note->descsz == sizeof (pxstatus_t) #endif ) { pstatus_t pstat; memcpy (&pstat, note->descdata, sizeof (pstat)); elf_tdata (abfd)->core_pid = pstat.pr_pid; } #if defined (HAVE_PSTATUS32_T) else if (note->descsz == sizeof (pstatus32_t)) { /* 64-bit host, 32-bit corefile */ pstatus32_t pstat; memcpy (&pstat, note->descdata, sizeof (pstat)); elf_tdata (abfd)->core_pid = pstat.pr_pid; } #endif /* Could grab some more details from the "representative" lwpstatus_t in pstat.pr_lwp, but we'll catch it all in an NT_LWPSTATUS note, presumably. */ return TRUE; } #endif /* defined (HAVE_PSTATUS_T) */ #if defined (HAVE_LWPSTATUS_T) static bfd_boolean elfcore_grok_lwpstatus (bfd *abfd, Elf_Internal_Note *note) { lwpstatus_t lwpstat; char buf[100]; char *name; size_t len; asection *sect; if (note->descsz != sizeof (lwpstat) #if defined (HAVE_LWPXSTATUS_T) && note->descsz != sizeof (lwpxstatus_t) #endif ) return TRUE; memcpy (&lwpstat, note->descdata, sizeof (lwpstat)); elf_tdata (abfd)->core_lwpid = lwpstat.pr_lwpid; elf_tdata (abfd)->core_signal = lwpstat.pr_cursig; /* Make a ".reg/999" section. */ sprintf (buf, ".reg/%d", elfcore_make_pid (abfd)); len = strlen (buf) + 1; name = bfd_alloc (abfd, len); if (name == NULL) return FALSE; memcpy (name, buf, len); sect = bfd_make_section_anyway_with_flags (abfd, name, SEC_HAS_CONTENTS); if (sect == NULL) return FALSE; #if defined (HAVE_LWPSTATUS_T_PR_CONTEXT) sect->size = sizeof (lwpstat.pr_context.uc_mcontext.gregs); sect->filepos = note->descpos + offsetof (lwpstatus_t, pr_context.uc_mcontext.gregs); #endif #if defined (HAVE_LWPSTATUS_T_PR_REG) sect->size = sizeof (lwpstat.pr_reg); sect->filepos = note->descpos + offsetof (lwpstatus_t, pr_reg); #endif sect->alignment_power = 2; if (!elfcore_maybe_make_sect (abfd, ".reg", sect)) return FALSE; /* Make a ".reg2/999" section */ sprintf (buf, ".reg2/%d", elfcore_make_pid (abfd)); len = strlen (buf) + 1; name = bfd_alloc (abfd, len); if (name == NULL) return FALSE; memcpy (name, buf, len); sect = bfd_make_section_anyway_with_flags (abfd, name, SEC_HAS_CONTENTS); if (sect == NULL) return FALSE; #if defined (HAVE_LWPSTATUS_T_PR_CONTEXT) sect->size = sizeof (lwpstat.pr_context.uc_mcontext.fpregs); sect->filepos = note->descpos + offsetof (lwpstatus_t, pr_context.uc_mcontext.fpregs); #endif #if defined (HAVE_LWPSTATUS_T_PR_FPREG) sect->size = sizeof (lwpstat.pr_fpreg); sect->filepos = note->descpos + offsetof (lwpstatus_t, pr_fpreg); #endif sect->alignment_power = 2; return elfcore_maybe_make_sect (abfd, ".reg2", sect); } #endif /* defined (HAVE_LWPSTATUS_T) */ #if defined (HAVE_WIN32_PSTATUS_T) static bfd_boolean elfcore_grok_win32pstatus (bfd *abfd, Elf_Internal_Note *note) { char buf[30]; char *name; size_t len; asection *sect; win32_pstatus_t pstatus; if (note->descsz < sizeof (pstatus)) return TRUE; memcpy (&pstatus, note->descdata, sizeof (pstatus)); switch (pstatus.data_type) { case NOTE_INFO_PROCESS: /* FIXME: need to add ->core_command. */ elf_tdata (abfd)->core_signal = pstatus.data.process_info.signal; elf_tdata (abfd)->core_pid = pstatus.data.process_info.pid; break; case NOTE_INFO_THREAD: /* Make a ".reg/999" section. */ sprintf (buf, ".reg/%ld", (long) pstatus.data.thread_info.tid); len = strlen (buf) + 1; name = bfd_alloc (abfd, len); if (name == NULL) return FALSE; memcpy (name, buf, len); sect = bfd_make_section_anyway_with_flags (abfd, name, SEC_HAS_CONTENTS); if (sect == NULL) return FALSE; sect->size = sizeof (pstatus.data.thread_info.thread_context); sect->filepos = (note->descpos + offsetof (struct win32_pstatus, data.thread_info.thread_context)); sect->alignment_power = 2; if (pstatus.data.thread_info.is_active_thread) if (! elfcore_maybe_make_sect (abfd, ".reg", sect)) return FALSE; break; case NOTE_INFO_MODULE: /* Make a ".module/xxxxxxxx" section. */ sprintf (buf, ".module/%08lx", (long) pstatus.data.module_info.base_address); len = strlen (buf) + 1; name = bfd_alloc (abfd, len); if (name == NULL) return FALSE; memcpy (name, buf, len); sect = bfd_make_section_anyway_with_flags (abfd, name, SEC_HAS_CONTENTS); if (sect == NULL) return FALSE; sect->size = note->descsz; sect->filepos = note->descpos; sect->alignment_power = 2; break; default: return TRUE; } return TRUE; } #endif /* HAVE_WIN32_PSTATUS_T */ static bfd_boolean elfcore_grok_note (bfd *abfd, Elf_Internal_Note *note) { const struct elf_backend_data *bed = get_elf_backend_data (abfd); switch (note->type) { default: return TRUE; case NT_PRSTATUS: if (bed->elf_backend_grok_prstatus) if ((*bed->elf_backend_grok_prstatus) (abfd, note)) return TRUE; #if defined (HAVE_PRSTATUS_T) return elfcore_grok_prstatus (abfd, note); #else return TRUE; #endif #if defined (HAVE_PSTATUS_T) case NT_PSTATUS: return elfcore_grok_pstatus (abfd, note); #endif #if defined (HAVE_LWPSTATUS_T) case NT_LWPSTATUS: return elfcore_grok_lwpstatus (abfd, note); #endif case NT_FPREGSET: /* FIXME: rename to NT_PRFPREG */ return elfcore_grok_prfpreg (abfd, note); #if defined (HAVE_WIN32_PSTATUS_T) case NT_WIN32PSTATUS: return elfcore_grok_win32pstatus (abfd, note); #endif case NT_PRXFPREG: /* Linux SSE extension */ if (note->namesz == 6 && strcmp (note->namedata, "LINUX") == 0) return elfcore_grok_prxfpreg (abfd, note); else return TRUE; case NT_PRPSINFO: case NT_PSINFO: if (bed->elf_backend_grok_psinfo) if ((*bed->elf_backend_grok_psinfo) (abfd, note)) return TRUE; #if defined (HAVE_PRPSINFO_T) || defined (HAVE_PSINFO_T) return elfcore_grok_psinfo (abfd, note); #else return TRUE; #endif case NT_AUXV: { asection *sect = bfd_make_section_anyway_with_flags (abfd, ".auxv", SEC_HAS_CONTENTS); if (sect == NULL) return FALSE; sect->size = note->descsz; sect->filepos = note->descpos; sect->alignment_power = 1 + bfd_get_arch_size (abfd) / 32; return TRUE; } #if defined (HAVE_THRMISC_T) case NT_THRMISC: return elfcore_grok_thrmisc (abfd, note); #endif } } static bfd_boolean elfcore_netbsd_get_lwpid (Elf_Internal_Note *note, int *lwpidp) { char *cp; cp = strchr (note->namedata, '@'); if (cp != NULL) { *lwpidp = atoi(cp + 1); return TRUE; } return FALSE; } static bfd_boolean elfcore_grok_netbsd_procinfo (bfd *abfd, Elf_Internal_Note *note) { /* Signal number at offset 0x08. */ elf_tdata (abfd)->core_signal = bfd_h_get_32 (abfd, (bfd_byte *) note->descdata + 0x08); /* Process ID at offset 0x50. */ elf_tdata (abfd)->core_pid = bfd_h_get_32 (abfd, (bfd_byte *) note->descdata + 0x50); /* Command name at 0x7c (max 32 bytes, including nul). */ elf_tdata (abfd)->core_command = _bfd_elfcore_strndup (abfd, note->descdata + 0x7c, 31); return elfcore_make_note_pseudosection (abfd, ".note.netbsdcore.procinfo", note); } static bfd_boolean elfcore_grok_netbsd_note (bfd *abfd, Elf_Internal_Note *note) { int lwp; if (elfcore_netbsd_get_lwpid (note, &lwp)) elf_tdata (abfd)->core_lwpid = lwp; if (note->type == NT_NETBSDCORE_PROCINFO) { /* NetBSD-specific core "procinfo". Note that we expect to find this note before any of the others, which is fine, since the kernel writes this note out first when it creates a core file. */ return elfcore_grok_netbsd_procinfo (abfd, note); } /* As of Jan 2002 there are no other machine-independent notes defined for NetBSD core files. If the note type is less than the start of the machine-dependent note types, we don't understand it. */ if (note->type < NT_NETBSDCORE_FIRSTMACH) return TRUE; switch (bfd_get_arch (abfd)) { /* On the Alpha, SPARC (32-bit and 64-bit), PT_GETREGS == mach+0 and PT_GETFPREGS == mach+2. */ case bfd_arch_alpha: case bfd_arch_sparc: switch (note->type) { case NT_NETBSDCORE_FIRSTMACH+0: return elfcore_make_note_pseudosection (abfd, ".reg", note); case NT_NETBSDCORE_FIRSTMACH+2: return elfcore_make_note_pseudosection (abfd, ".reg2", note); default: return TRUE; } /* On all other arch's, PT_GETREGS == mach+1 and PT_GETFPREGS == mach+3. */ default: switch (note->type) { case NT_NETBSDCORE_FIRSTMACH+1: return elfcore_make_note_pseudosection (abfd, ".reg", note); case NT_NETBSDCORE_FIRSTMACH+3: return elfcore_make_note_pseudosection (abfd, ".reg2", note); default: return TRUE; } } /* NOTREACHED */ } static bfd_boolean elfcore_grok_nto_status (bfd *abfd, Elf_Internal_Note *note, long *tid) { void *ddata = note->descdata; char buf[100]; char *name; asection *sect; short sig; unsigned flags; /* nto_procfs_status 'pid' field is at offset 0. */ elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, (bfd_byte *) ddata); /* nto_procfs_status 'tid' field is at offset 4. Pass it back. */ *tid = bfd_get_32 (abfd, (bfd_byte *) ddata + 4); /* nto_procfs_status 'flags' field is at offset 8. */ flags = bfd_get_32 (abfd, (bfd_byte *) ddata + 8); /* nto_procfs_status 'what' field is at offset 14. */ if ((sig = bfd_get_16 (abfd, (bfd_byte *) ddata + 14)) > 0) { elf_tdata (abfd)->core_signal = sig; elf_tdata (abfd)->core_lwpid = *tid; } /* _DEBUG_FLAG_CURTID (current thread) is 0x80. Some cores do not come from signals so we make sure we set the current thread just in case. */ if (flags & 0x00000080) elf_tdata (abfd)->core_lwpid = *tid; /* Make a ".qnx_core_status/%d" section. */ sprintf (buf, ".qnx_core_status/%ld", *tid); name = bfd_alloc (abfd, strlen (buf) + 1); if (name == NULL) return FALSE; strcpy (name, buf); sect = bfd_make_section_anyway_with_flags (abfd, name, SEC_HAS_CONTENTS); if (sect == NULL) return FALSE; sect->size = note->descsz; sect->filepos = note->descpos; sect->alignment_power = 2; return (elfcore_maybe_make_sect (abfd, ".qnx_core_status", sect)); } static bfd_boolean elfcore_grok_nto_regs (bfd *abfd, Elf_Internal_Note *note, long tid, char *base) { char buf[100]; char *name; asection *sect; /* Make a "(base)/%d" section. */ sprintf (buf, "%s/%ld", base, tid); name = bfd_alloc (abfd, strlen (buf) + 1); if (name == NULL) return FALSE; strcpy (name, buf); sect = bfd_make_section_anyway_with_flags (abfd, name, SEC_HAS_CONTENTS); if (sect == NULL) return FALSE; sect->size = note->descsz; sect->filepos = note->descpos; sect->alignment_power = 2; /* This is the current thread. */ if (elf_tdata (abfd)->core_lwpid == tid) return elfcore_maybe_make_sect (abfd, base, sect); return TRUE; } #define BFD_QNT_CORE_INFO 7 #define BFD_QNT_CORE_STATUS 8 #define BFD_QNT_CORE_GREG 9 #define BFD_QNT_CORE_FPREG 10 static bfd_boolean elfcore_grok_nto_note (bfd *abfd, Elf_Internal_Note *note) { /* Every GREG section has a STATUS section before it. Store the tid from the previous call to pass down to the next gregs function. */ static long tid = 1; switch (note->type) { case BFD_QNT_CORE_INFO: return elfcore_make_note_pseudosection (abfd, ".qnx_core_info", note); case BFD_QNT_CORE_STATUS: return elfcore_grok_nto_status (abfd, note, &tid); case BFD_QNT_CORE_GREG: return elfcore_grok_nto_regs (abfd, note, tid, ".reg"); case BFD_QNT_CORE_FPREG: return elfcore_grok_nto_regs (abfd, note, tid, ".reg2"); default: return TRUE; } } /* Function: elfcore_write_note Inputs: buffer to hold note, and current size of buffer name of note type of note data for note size of data for note Writes note to end of buffer. ELF64 notes are written exactly as for ELF32, despite the current (as of 2006) ELF gabi specifying that they ought to have 8-byte namesz and descsz field, and have 8-byte alignment. Other writers, eg. Linux kernel, do the same. Return: Pointer to realloc'd buffer, *BUFSIZ updated. */ char * elfcore_write_note (bfd *abfd, char *buf, int *bufsiz, const char *name, int type, const void *input, int size) { Elf_External_Note *xnp; size_t namesz; size_t newspace; char *dest; namesz = 0; if (name != NULL) namesz = strlen (name) + 1; newspace = 12 + ((namesz + 3) & -4) + ((size + 3) & -4); buf = realloc (buf, *bufsiz + newspace); dest = buf + *bufsiz; *bufsiz += newspace; xnp = (Elf_External_Note *) dest; H_PUT_32 (abfd, namesz, xnp->namesz); H_PUT_32 (abfd, size, xnp->descsz); H_PUT_32 (abfd, type, xnp->type); dest = xnp->name; if (name != NULL) { memcpy (dest, name, namesz); dest += namesz; while (namesz & 3) { *dest++ = '\0'; ++namesz; } } memcpy (dest, input, size); dest += size; while (size & 3) { *dest++ = '\0'; ++size; } return buf; } #if defined (HAVE_PRPSINFO_T) || defined (HAVE_PSINFO_T) char * elfcore_write_prpsinfo (bfd *abfd, char *buf, int *bufsiz, const char *fname, const char *psargs) { const char *note_name = "CORE"; const struct elf_backend_data *bed = get_elf_backend_data (abfd); if (bed->elf_backend_write_core_note != NULL) { char *ret; ret = (*bed->elf_backend_write_core_note) (abfd, buf, bufsiz, NT_PRPSINFO, fname, psargs); if (ret != NULL) return ret; } #if defined (HAVE_PRPSINFO32_T) || defined (HAVE_PSINFO32_T) if (bed->s->elfclass == ELFCLASS32) { #if defined (HAVE_PSINFO32_T) psinfo32_t data; int note_type = NT_PSINFO; #else prpsinfo32_t data; int note_type = NT_PRPSINFO; #endif memset (&data, 0, sizeof (data)); strncpy (data.pr_fname, fname, sizeof (data.pr_fname)); strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs)); return elfcore_write_note (abfd, buf, bufsiz, note_name, note_type, &data, sizeof (data)); } else #endif { #if defined (HAVE_PSINFO_T) psinfo_t data; int note_type = NT_PSINFO; #else prpsinfo_t data; int note_type = NT_PRPSINFO; #endif memset (&data, 0, sizeof (data)); strncpy (data.pr_fname, fname, sizeof (data.pr_fname)); strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs)); return elfcore_write_note (abfd, buf, bufsiz, note_name, note_type, &data, sizeof (data)); } } #endif /* PSINFO_T or PRPSINFO_T */ #if defined (HAVE_PRSTATUS_T) char * elfcore_write_prstatus (bfd *abfd, char *buf, int *bufsiz, long pid, int cursig, const void *gregs) { const char *note_name = "CORE"; const struct elf_backend_data *bed = get_elf_backend_data (abfd); if (bed->elf_backend_write_core_note != NULL) { char *ret; ret = (*bed->elf_backend_write_core_note) (abfd, buf, bufsiz, NT_PRSTATUS, pid, cursig, gregs); if (ret != NULL) return ret; } #if defined (HAVE_PRSTATUS32_T) if (bed->s->elfclass == ELFCLASS32) { prstatus32_t prstat; memset (&prstat, 0, sizeof (prstat)); prstat.pr_pid = pid; prstat.pr_cursig = cursig; memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg)); return elfcore_write_note (abfd, buf, bufsiz, note_name, NT_PRSTATUS, &prstat, sizeof (prstat)); } else #endif { prstatus_t prstat; memset (&prstat, 0, sizeof (prstat)); prstat.pr_pid = pid; prstat.pr_cursig = cursig; memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg)); return elfcore_write_note (abfd, buf, bufsiz, note_name, NT_PRSTATUS, &prstat, sizeof (prstat)); } } #endif /* HAVE_PRSTATUS_T */ #if defined (HAVE_LWPSTATUS_T) char * elfcore_write_lwpstatus (bfd *abfd, char *buf, int *bufsiz, long pid, int cursig, const void *gregs) { lwpstatus_t lwpstat; const char *note_name = "CORE"; memset (&lwpstat, 0, sizeof (lwpstat)); lwpstat.pr_lwpid = pid >> 16; lwpstat.pr_cursig = cursig; #if defined (HAVE_LWPSTATUS_T_PR_REG) memcpy (lwpstat.pr_reg, gregs, sizeof (lwpstat.pr_reg)); #elif defined (HAVE_LWPSTATUS_T_PR_CONTEXT) #if !defined(gregs) memcpy (lwpstat.pr_context.uc_mcontext.gregs, gregs, sizeof (lwpstat.pr_context.uc_mcontext.gregs)); #else memcpy (lwpstat.pr_context.uc_mcontext.__gregs, gregs, sizeof (lwpstat.pr_context.uc_mcontext.__gregs)); #endif #endif return elfcore_write_note (abfd, buf, bufsiz, note_name, NT_LWPSTATUS, &lwpstat, sizeof (lwpstat)); } #endif /* HAVE_LWPSTATUS_T */ #if defined (HAVE_PSTATUS_T) char * elfcore_write_pstatus (bfd *abfd, char *buf, int *bufsiz, long pid, int cursig ATTRIBUTE_UNUSED, const void *gregs ATTRIBUTE_UNUSED) { const char *note_name = "CORE"; #if defined (HAVE_PSTATUS32_T) const struct elf_backend_data *bed = get_elf_backend_data (abfd); if (bed->s->elfclass == ELFCLASS32) { pstatus32_t pstat; memset (&pstat, 0, sizeof (pstat)); pstat.pr_pid = pid & 0xffff; buf = elfcore_write_note (abfd, buf, bufsiz, note_name, NT_PSTATUS, &pstat, sizeof (pstat)); return buf; } else #endif { pstatus_t pstat; memset (&pstat, 0, sizeof (pstat)); pstat.pr_pid = pid & 0xffff; buf = elfcore_write_note (abfd, buf, bufsiz, note_name, NT_PSTATUS, &pstat, sizeof (pstat)); return buf; } } #endif /* HAVE_PSTATUS_T */ char * elfcore_write_prfpreg (bfd *abfd, char *buf, int *bufsiz, const void *fpregs, int size) { const char *note_name = "CORE"; return elfcore_write_note (abfd, buf, bufsiz, note_name, NT_FPREGSET, fpregs, size); } char * elfcore_write_thrmisc (bfd *abfd, char *buf, int *bufsiz, const char *tname, int size) { #if defined (HAVE_THRMISC_T) char *note_name = "CORE"; return elfcore_write_note (abfd, buf, bufsiz, note_name, NT_THRMISC, tname, size); #else return buf; #endif } char * elfcore_write_prxfpreg (bfd *abfd, char *buf, int *bufsiz, const void *xfpregs, int size) { char *note_name = "LINUX"; return elfcore_write_note (abfd, buf, bufsiz, note_name, NT_PRXFPREG, xfpregs, size); } static bfd_boolean elfcore_read_notes (bfd *abfd, file_ptr offset, bfd_size_type size) { char *buf; char *p; if (size <= 0) return TRUE; if (bfd_seek (abfd, offset, SEEK_SET) != 0) return FALSE; buf = bfd_malloc (size); if (buf == NULL) return FALSE; if (bfd_bread (buf, size, abfd) != size) { error: free (buf); return FALSE; } p = buf; while (p < buf + size) { /* FIXME: bad alignment assumption. */ Elf_External_Note *xnp = (Elf_External_Note *) p; Elf_Internal_Note in; in.type = H_GET_32 (abfd, xnp->type); in.namesz = H_GET_32 (abfd, xnp->namesz); in.namedata = xnp->name; in.descsz = H_GET_32 (abfd, xnp->descsz); in.descdata = in.namedata + BFD_ALIGN (in.namesz, 4); in.descpos = offset + (in.descdata - buf); if (CONST_STRNEQ (in.namedata, "NetBSD-CORE")) { if (! elfcore_grok_netbsd_note (abfd, &in)) goto error; } else if (CONST_STRNEQ (in.namedata, "QNX")) { if (! elfcore_grok_nto_note (abfd, &in)) goto error; } else { if (! elfcore_grok_note (abfd, &in)) goto error; } p = in.descdata + BFD_ALIGN (in.descsz, 4); } free (buf); return TRUE; } /* Providing external access to the ELF program header table. */ /* Return an upper bound on the number of bytes required to store a copy of ABFD's program header table entries. Return -1 if an error occurs; bfd_get_error will return an appropriate code. */ long bfd_get_elf_phdr_upper_bound (bfd *abfd) { if (abfd->xvec->flavour != bfd_target_elf_flavour) { bfd_set_error (bfd_error_wrong_format); return -1; } return elf_elfheader (abfd)->e_phnum * sizeof (Elf_Internal_Phdr); } /* Copy ABFD's program header table entries to *PHDRS. The entries will be stored as an array of Elf_Internal_Phdr structures, as defined in include/elf/internal.h. To find out how large the buffer needs to be, call bfd_get_elf_phdr_upper_bound. Return the number of program header table entries read, or -1 if an error occurs; bfd_get_error will return an appropriate code. */ int bfd_get_elf_phdrs (bfd *abfd, void *phdrs) { int num_phdrs; if (abfd->xvec->flavour != bfd_target_elf_flavour) { bfd_set_error (bfd_error_wrong_format); return -1; } num_phdrs = elf_elfheader (abfd)->e_phnum; memcpy (phdrs, elf_tdata (abfd)->phdr, num_phdrs * sizeof (Elf_Internal_Phdr)); return num_phdrs; } void _bfd_elf_sprintf_vma (bfd *abfd ATTRIBUTE_UNUSED, char *buf, bfd_vma value) { #ifdef BFD64 Elf_Internal_Ehdr *i_ehdrp; /* Elf file header, internal form */ i_ehdrp = elf_elfheader (abfd); if (i_ehdrp == NULL) sprintf_vma (buf, value); else { if (i_ehdrp->e_ident[EI_CLASS] == ELFCLASS64) { #if BFD_HOST_64BIT_LONG sprintf (buf, "%016lx", value); #else sprintf (buf, "%08lx%08lx", _bfd_int64_high (value), _bfd_int64_low (value)); #endif } else sprintf (buf, "%08lx", (unsigned long) (value & 0xffffffff)); } #else sprintf_vma (buf, value); #endif } void _bfd_elf_fprintf_vma (bfd *abfd ATTRIBUTE_UNUSED, void *stream, bfd_vma value) { #ifdef BFD64 Elf_Internal_Ehdr *i_ehdrp; /* Elf file header, internal form */ i_ehdrp = elf_elfheader (abfd); if (i_ehdrp == NULL) fprintf_vma ((FILE *) stream, value); else { if (i_ehdrp->e_ident[EI_CLASS] == ELFCLASS64) { #if BFD_HOST_64BIT_LONG fprintf ((FILE *) stream, "%016lx", value); #else fprintf ((FILE *) stream, "%08lx%08lx", _bfd_int64_high (value), _bfd_int64_low (value)); #endif } else fprintf ((FILE *) stream, "%08lx", (unsigned long) (value & 0xffffffff)); } #else fprintf_vma ((FILE *) stream, value); #endif } enum elf_reloc_type_class _bfd_elf_reloc_type_class (const Elf_Internal_Rela *rela ATTRIBUTE_UNUSED) { return reloc_class_normal; } /* For RELA architectures, return the relocation value for a relocation against a local symbol. */ bfd_vma _bfd_elf_rela_local_sym (bfd *abfd, Elf_Internal_Sym *sym, asection **psec, Elf_Internal_Rela *rel) { asection *sec = *psec; bfd_vma relocation; relocation = (sec->output_section->vma + sec->output_offset + sym->st_value); if ((sec->flags & SEC_MERGE) && ELF_ST_TYPE (sym->st_info) == STT_SECTION && sec->sec_info_type == ELF_INFO_TYPE_MERGE) { rel->r_addend = _bfd_merged_section_offset (abfd, psec, elf_section_data (sec)->sec_info, sym->st_value + rel->r_addend); if (sec != *psec) { /* If we have changed the section, and our original section is marked with SEC_EXCLUDE, it means that the original SEC_MERGE section has been completely subsumed in some other SEC_MERGE section. In this case, we need to leave some info around for --emit-relocs. */ if ((sec->flags & SEC_EXCLUDE) != 0) sec->kept_section = *psec; sec = *psec; } rel->r_addend -= relocation; rel->r_addend += sec->output_section->vma + sec->output_offset; } return relocation; } bfd_vma _bfd_elf_rel_local_sym (bfd *abfd, Elf_Internal_Sym *sym, asection **psec, bfd_vma addend) { asection *sec = *psec; if (sec->sec_info_type != ELF_INFO_TYPE_MERGE) return sym->st_value + addend; return _bfd_merged_section_offset (abfd, psec, elf_section_data (sec)->sec_info, sym->st_value + addend); } bfd_vma _bfd_elf_section_offset (bfd *abfd, struct bfd_link_info *info, asection *sec, bfd_vma offset) { switch (sec->sec_info_type) { case ELF_INFO_TYPE_STABS: return _bfd_stab_section_offset (sec, elf_section_data (sec)->sec_info, offset); case ELF_INFO_TYPE_EH_FRAME: return _bfd_elf_eh_frame_section_offset (abfd, info, sec, offset); default: return offset; } } /* Create a new BFD as if by bfd_openr. Rather than opening a file, reconstruct an ELF file by reading the segments out of remote memory based on the ELF file header at EHDR_VMA and the ELF program headers it points to. If not null, *LOADBASEP is filled in with the difference between the VMAs from which the segments were read, and the VMAs the file headers (and hence BFD's idea of each section's VMA) put them at. The function TARGET_READ_MEMORY is called to copy LEN bytes from the remote memory at target address VMA into the local buffer at MYADDR; it should return zero on success or an `errno' code on failure. TEMPL must be a BFD for an ELF target with the word size and byte order found in the remote memory. */ bfd * bfd_elf_bfd_from_remote_memory (bfd *templ, bfd_vma ehdr_vma, bfd_vma *loadbasep, int (*target_read_memory) (bfd_vma, bfd_byte *, int)) { return (*get_elf_backend_data (templ)->elf_backend_bfd_from_remote_memory) (templ, ehdr_vma, loadbasep, target_read_memory); } long _bfd_elf_get_synthetic_symtab (bfd *abfd, long symcount ATTRIBUTE_UNUSED, asymbol **syms ATTRIBUTE_UNUSED, long dynsymcount, asymbol **dynsyms, asymbol **ret) { const struct elf_backend_data *bed = get_elf_backend_data (abfd); asection *relplt; asymbol *s; const char *relplt_name; bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean); arelent *p; long count, i, n; size_t size; Elf_Internal_Shdr *hdr; char *names; asection *plt; *ret = NULL; if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0) return 0; if (dynsymcount <= 0) return 0; if (!bed->plt_sym_val) return 0; relplt_name = bed->relplt_name; if (relplt_name == NULL) relplt_name = bed->default_use_rela_p ? ".rela.plt" : ".rel.plt"; relplt = bfd_get_section_by_name (abfd, relplt_name); if (relplt == NULL) return 0; hdr = &elf_section_data (relplt)->this_hdr; if (hdr->sh_link != elf_dynsymtab (abfd) || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA)) return 0; plt = bfd_get_section_by_name (abfd, ".plt"); if (plt == NULL) return 0; slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table; if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE)) return -1; count = relplt->size / hdr->sh_entsize; size = count * sizeof (asymbol); p = relplt->relocation; for (i = 0; i < count; i++, s++, p++) size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt"); s = *ret = bfd_malloc (size); if (s == NULL) return -1; names = (char *) (s + count); p = relplt->relocation; n = 0; for (i = 0; i < count; i++, s++, p++) { size_t len; bfd_vma addr; addr = bed->plt_sym_val (i, plt, p); if (addr == (bfd_vma) -1) continue; *s = **p->sym_ptr_ptr; /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since we are defining a symbol, ensure one of them is set. */ if ((s->flags & BSF_LOCAL) == 0) s->flags |= BSF_GLOBAL; s->section = plt; s->value = addr - plt->vma; s->name = names; len = strlen ((*p->sym_ptr_ptr)->name); memcpy (names, (*p->sym_ptr_ptr)->name, len); names += len; memcpy (names, "@plt", sizeof ("@plt")); names += sizeof ("@plt"); ++n; } return n; } struct elf_symbuf_symbol { unsigned long st_name; /* Symbol name, index in string tbl */ unsigned char st_info; /* Type and binding attributes */ unsigned char st_other; /* Visibilty, and target specific */ }; struct elf_symbuf_head { struct elf_symbuf_symbol *ssym; bfd_size_type count; unsigned int st_shndx; }; struct elf_symbol { union { Elf_Internal_Sym *isym; struct elf_symbuf_symbol *ssym; } u; const char *name; }; /* Sort references to symbols by ascending section number. */ static int elf_sort_elf_symbol (const void *arg1, const void *arg2) { const Elf_Internal_Sym *s1 = *(const Elf_Internal_Sym **) arg1; const Elf_Internal_Sym *s2 = *(const Elf_Internal_Sym **) arg2; return s1->st_shndx - s2->st_shndx; } static int elf_sym_name_compare (const void *arg1, const void *arg2) { const struct elf_symbol *s1 = (const struct elf_symbol *) arg1; const struct elf_symbol *s2 = (const struct elf_symbol *) arg2; return strcmp (s1->name, s2->name); } static struct elf_symbuf_head * elf_create_symbuf (bfd_size_type symcount, Elf_Internal_Sym *isymbuf) { Elf_Internal_Sym **ind, **indbufend, **indbuf = bfd_malloc2 (symcount, sizeof (*indbuf)); struct elf_symbuf_symbol *ssym; struct elf_symbuf_head *ssymbuf, *ssymhead; bfd_size_type i, shndx_count; if (indbuf == NULL) return NULL; for (ind = indbuf, i = 0; i < symcount; i++) if (isymbuf[i].st_shndx != SHN_UNDEF) *ind++ = &isymbuf[i]; indbufend = ind; qsort (indbuf, indbufend - indbuf, sizeof (Elf_Internal_Sym *), elf_sort_elf_symbol); shndx_count = 0; if (indbufend > indbuf) for (ind = indbuf, shndx_count++; ind < indbufend - 1; ind++) if (ind[0]->st_shndx != ind[1]->st_shndx) shndx_count++; ssymbuf = bfd_malloc ((shndx_count + 1) * sizeof (*ssymbuf) - + (indbufend - indbuf) * sizeof (*ssymbuf)); + + (indbufend - indbuf) * sizeof (*ssym)); if (ssymbuf == NULL) { free (indbuf); return NULL; } - ssym = (struct elf_symbuf_symbol *) (ssymbuf + shndx_count); + ssym = (struct elf_symbuf_symbol *) (ssymbuf + shndx_count + 1); ssymbuf->ssym = NULL; ssymbuf->count = shndx_count; ssymbuf->st_shndx = 0; for (ssymhead = ssymbuf, ind = indbuf; ind < indbufend; ssym++, ind++) { if (ind == indbuf || ssymhead->st_shndx != (*ind)->st_shndx) { ssymhead++; ssymhead->ssym = ssym; ssymhead->count = 0; ssymhead->st_shndx = (*ind)->st_shndx; } ssym->st_name = (*ind)->st_name; ssym->st_info = (*ind)->st_info; ssym->st_other = (*ind)->st_other; ssymhead->count++; } BFD_ASSERT ((bfd_size_type) (ssymhead - ssymbuf) == shndx_count); free (indbuf); return ssymbuf; } /* Check if 2 sections define the same set of local and global symbols. */ bfd_boolean bfd_elf_match_symbols_in_sections (asection *sec1, asection *sec2, struct bfd_link_info *info) { bfd *bfd1, *bfd2; const struct elf_backend_data *bed1, *bed2; Elf_Internal_Shdr *hdr1, *hdr2; bfd_size_type symcount1, symcount2; Elf_Internal_Sym *isymbuf1, *isymbuf2; struct elf_symbuf_head *ssymbuf1, *ssymbuf2; Elf_Internal_Sym *isym, *isymend; struct elf_symbol *symtable1 = NULL, *symtable2 = NULL; bfd_size_type count1, count2, i; int shndx1, shndx2; bfd_boolean result; bfd1 = sec1->owner; bfd2 = sec2->owner; /* If both are .gnu.linkonce sections, they have to have the same section name. */ if (CONST_STRNEQ (sec1->name, ".gnu.linkonce") && CONST_STRNEQ (sec2->name, ".gnu.linkonce")) return strcmp (sec1->name + sizeof ".gnu.linkonce", sec2->name + sizeof ".gnu.linkonce") == 0; /* Both sections have to be in ELF. */ if (bfd_get_flavour (bfd1) != bfd_target_elf_flavour || bfd_get_flavour (bfd2) != bfd_target_elf_flavour) return FALSE; if (elf_section_type (sec1) != elf_section_type (sec2)) return FALSE; if ((elf_section_flags (sec1) & SHF_GROUP) != 0 && (elf_section_flags (sec2) & SHF_GROUP) != 0) { /* If both are members of section groups, they have to have the same group name. */ if (strcmp (elf_group_name (sec1), elf_group_name (sec2)) != 0) return FALSE; } shndx1 = _bfd_elf_section_from_bfd_section (bfd1, sec1); shndx2 = _bfd_elf_section_from_bfd_section (bfd2, sec2); if (shndx1 == -1 || shndx2 == -1) return FALSE; bed1 = get_elf_backend_data (bfd1); bed2 = get_elf_backend_data (bfd2); hdr1 = &elf_tdata (bfd1)->symtab_hdr; symcount1 = hdr1->sh_size / bed1->s->sizeof_sym; hdr2 = &elf_tdata (bfd2)->symtab_hdr; symcount2 = hdr2->sh_size / bed2->s->sizeof_sym; if (symcount1 == 0 || symcount2 == 0) return FALSE; result = FALSE; isymbuf1 = NULL; isymbuf2 = NULL; ssymbuf1 = elf_tdata (bfd1)->symbuf; ssymbuf2 = elf_tdata (bfd2)->symbuf; if (ssymbuf1 == NULL) { isymbuf1 = bfd_elf_get_elf_syms (bfd1, hdr1, symcount1, 0, NULL, NULL, NULL); if (isymbuf1 == NULL) goto done; if (!info->reduce_memory_overheads) elf_tdata (bfd1)->symbuf = ssymbuf1 = elf_create_symbuf (symcount1, isymbuf1); } if (ssymbuf1 == NULL || ssymbuf2 == NULL) { isymbuf2 = bfd_elf_get_elf_syms (bfd2, hdr2, symcount2, 0, NULL, NULL, NULL); if (isymbuf2 == NULL) goto done; if (ssymbuf1 != NULL && !info->reduce_memory_overheads) elf_tdata (bfd2)->symbuf = ssymbuf2 = elf_create_symbuf (symcount2, isymbuf2); } if (ssymbuf1 != NULL && ssymbuf2 != NULL) { /* Optimized faster version. */ bfd_size_type lo, hi, mid; struct elf_symbol *symp; struct elf_symbuf_symbol *ssym, *ssymend; lo = 0; hi = ssymbuf1->count; ssymbuf1++; count1 = 0; while (lo < hi) { mid = (lo + hi) / 2; if ((unsigned int) shndx1 < ssymbuf1[mid].st_shndx) hi = mid; else if ((unsigned int) shndx1 > ssymbuf1[mid].st_shndx) lo = mid + 1; else { count1 = ssymbuf1[mid].count; ssymbuf1 += mid; break; } } lo = 0; hi = ssymbuf2->count; ssymbuf2++; count2 = 0; while (lo < hi) { mid = (lo + hi) / 2; if ((unsigned int) shndx2 < ssymbuf2[mid].st_shndx) hi = mid; else if ((unsigned int) shndx2 > ssymbuf2[mid].st_shndx) lo = mid + 1; else { count2 = ssymbuf2[mid].count; ssymbuf2 += mid; break; } } if (count1 == 0 || count2 == 0 || count1 != count2) goto done; symtable1 = bfd_malloc (count1 * sizeof (struct elf_symbol)); symtable2 = bfd_malloc (count2 * sizeof (struct elf_symbol)); if (symtable1 == NULL || symtable2 == NULL) goto done; symp = symtable1; for (ssym = ssymbuf1->ssym, ssymend = ssym + count1; ssym < ssymend; ssym++, symp++) { symp->u.ssym = ssym; symp->name = bfd_elf_string_from_elf_section (bfd1, hdr1->sh_link, ssym->st_name); } symp = symtable2; for (ssym = ssymbuf2->ssym, ssymend = ssym + count2; ssym < ssymend; ssym++, symp++) { symp->u.ssym = ssym; symp->name = bfd_elf_string_from_elf_section (bfd2, hdr2->sh_link, ssym->st_name); } /* Sort symbol by name. */ qsort (symtable1, count1, sizeof (struct elf_symbol), elf_sym_name_compare); qsort (symtable2, count1, sizeof (struct elf_symbol), elf_sym_name_compare); for (i = 0; i < count1; i++) /* Two symbols must have the same binding, type and name. */ if (symtable1 [i].u.ssym->st_info != symtable2 [i].u.ssym->st_info || symtable1 [i].u.ssym->st_other != symtable2 [i].u.ssym->st_other || strcmp (symtable1 [i].name, symtable2 [i].name) != 0) goto done; result = TRUE; goto done; } symtable1 = bfd_malloc (symcount1 * sizeof (struct elf_symbol)); symtable2 = bfd_malloc (symcount2 * sizeof (struct elf_symbol)); if (symtable1 == NULL || symtable2 == NULL) goto done; /* Count definitions in the section. */ count1 = 0; for (isym = isymbuf1, isymend = isym + symcount1; isym < isymend; isym++) if (isym->st_shndx == (unsigned int) shndx1) symtable1[count1++].u.isym = isym; count2 = 0; for (isym = isymbuf2, isymend = isym + symcount2; isym < isymend; isym++) if (isym->st_shndx == (unsigned int) shndx2) symtable2[count2++].u.isym = isym; if (count1 == 0 || count2 == 0 || count1 != count2) goto done; for (i = 0; i < count1; i++) symtable1[i].name = bfd_elf_string_from_elf_section (bfd1, hdr1->sh_link, symtable1[i].u.isym->st_name); for (i = 0; i < count2; i++) symtable2[i].name = bfd_elf_string_from_elf_section (bfd2, hdr2->sh_link, symtable2[i].u.isym->st_name); /* Sort symbol by name. */ qsort (symtable1, count1, sizeof (struct elf_symbol), elf_sym_name_compare); qsort (symtable2, count1, sizeof (struct elf_symbol), elf_sym_name_compare); for (i = 0; i < count1; i++) /* Two symbols must have the same binding, type and name. */ if (symtable1 [i].u.isym->st_info != symtable2 [i].u.isym->st_info || symtable1 [i].u.isym->st_other != symtable2 [i].u.isym->st_other || strcmp (symtable1 [i].name, symtable2 [i].name) != 0) goto done; result = TRUE; done: if (symtable1) free (symtable1); if (symtable2) free (symtable2); if (isymbuf1) free (isymbuf1); if (isymbuf2) free (isymbuf2); return result; } /* It is only used by x86-64 so far. */ asection _bfd_elf_large_com_section = BFD_FAKE_SECTION (_bfd_elf_large_com_section, SEC_IS_COMMON, NULL, "LARGE_COMMON", 0); /* Return TRUE if 2 section types are compatible. */ bfd_boolean _bfd_elf_match_sections_by_type (bfd *abfd, const asection *asec, bfd *bbfd, const asection *bsec) { if (asec == NULL || bsec == NULL || abfd->xvec->flavour != bfd_target_elf_flavour || bbfd->xvec->flavour != bfd_target_elf_flavour) return TRUE; return elf_section_type (asec) == elf_section_type (bsec); } void _bfd_elf_set_osabi (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED) { Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */ i_ehdrp = elf_elfheader (abfd); i_ehdrp->e_ident[EI_OSABI] = get_elf_backend_data (abfd)->elf_osabi; } /* Return TRUE for ELF symbol types that represent functions. This is the default version of this function, which is sufficient for most targets. It returns true if TYPE is STT_FUNC. */ bfd_boolean _bfd_elf_is_function_type (unsigned int type) { return (type == STT_FUNC); } Index: projects/release-pkg/contrib/binutils =================================================================== --- projects/release-pkg/contrib/binutils (revision 295925) +++ projects/release-pkg/contrib/binutils (revision 295926) Property changes on: projects/release-pkg/contrib/binutils ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/binutils:r295886-295925 Index: projects/release-pkg/contrib/libarchive/libarchive/archive_read.c =================================================================== --- projects/release-pkg/contrib/libarchive/libarchive/archive_read.c (revision 295925) +++ projects/release-pkg/contrib/libarchive/libarchive/archive_read.c (revision 295926) @@ -1,1643 +1,1646 @@ /*- * Copyright (c) 2003-2011 Tim Kientzle * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * This file contains the "essential" portions of the read API, that * is, stuff that will probably always be used by any client that * actually needs to read an archive. Optional pieces have been, as * far as possible, separated out into separate files to avoid * needlessly bloating statically-linked clients. */ #include "archive_platform.h" __FBSDID("$FreeBSD$"); #ifdef HAVE_ERRNO_H #include #endif #include #ifdef HAVE_STDLIB_H #include #endif #ifdef HAVE_STRING_H #include #endif #ifdef HAVE_UNISTD_H #include #endif #include "archive.h" #include "archive_entry.h" #include "archive_private.h" #include "archive_read_private.h" #define minimum(a, b) (a < b ? a : b) static int choose_filters(struct archive_read *); static int choose_format(struct archive_read *); static struct archive_vtable *archive_read_vtable(void); static int64_t _archive_filter_bytes(struct archive *, int); static int _archive_filter_code(struct archive *, int); static const char *_archive_filter_name(struct archive *, int); static int _archive_filter_count(struct archive *); static int _archive_read_close(struct archive *); static int _archive_read_data_block(struct archive *, const void **, size_t *, int64_t *); static int _archive_read_free(struct archive *); static int _archive_read_next_header(struct archive *, struct archive_entry **); static int _archive_read_next_header2(struct archive *, struct archive_entry *); static int64_t advance_file_pointer(struct archive_read_filter *, int64_t); static struct archive_vtable * archive_read_vtable(void) { static struct archive_vtable av; static int inited = 0; if (!inited) { av.archive_filter_bytes = _archive_filter_bytes; av.archive_filter_code = _archive_filter_code; av.archive_filter_name = _archive_filter_name; av.archive_filter_count = _archive_filter_count; av.archive_read_data_block = _archive_read_data_block; av.archive_read_next_header = _archive_read_next_header; av.archive_read_next_header2 = _archive_read_next_header2; av.archive_free = _archive_read_free; av.archive_close = _archive_read_close; inited = 1; } return (&av); } /* * Allocate, initialize and return a struct archive object. */ struct archive * archive_read_new(void) { struct archive_read *a; a = (struct archive_read *)malloc(sizeof(*a)); if (a == NULL) return (NULL); memset(a, 0, sizeof(*a)); a->archive.magic = ARCHIVE_READ_MAGIC; a->archive.state = ARCHIVE_STATE_NEW; a->entry = archive_entry_new2(&a->archive); a->archive.vtable = archive_read_vtable(); return (&a->archive); } /* * Record the do-not-extract-to file. This belongs in archive_read_extract.c. */ void archive_read_extract_set_skip_file(struct archive *_a, int64_t d, int64_t i) { struct archive_read *a = (struct archive_read *)_a; if (ARCHIVE_OK != __archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_ANY, "archive_read_extract_set_skip_file")) return; a->skip_file_set = 1; a->skip_file_dev = d; a->skip_file_ino = i; } /* * Open the archive */ int archive_read_open(struct archive *a, void *client_data, archive_open_callback *client_opener, archive_read_callback *client_reader, archive_close_callback *client_closer) { /* Old archive_read_open() is just a thin shell around * archive_read_open1. */ archive_read_set_open_callback(a, client_opener); archive_read_set_read_callback(a, client_reader); archive_read_set_close_callback(a, client_closer); archive_read_set_callback_data(a, client_data); return archive_read_open1(a); } int archive_read_open2(struct archive *a, void *client_data, archive_open_callback *client_opener, archive_read_callback *client_reader, archive_skip_callback *client_skipper, archive_close_callback *client_closer) { /* Old archive_read_open2() is just a thin shell around * archive_read_open1. */ archive_read_set_callback_data(a, client_data); archive_read_set_open_callback(a, client_opener); archive_read_set_read_callback(a, client_reader); archive_read_set_skip_callback(a, client_skipper); archive_read_set_close_callback(a, client_closer); return archive_read_open1(a); } static ssize_t client_read_proxy(struct archive_read_filter *self, const void **buff) { ssize_t r; r = (self->archive->client.reader)(&self->archive->archive, self->data, buff); return (r); } static int64_t client_skip_proxy(struct archive_read_filter *self, int64_t request) { if (request < 0) __archive_errx(1, "Negative skip requested."); if (request == 0) return 0; if (self->archive->client.skipper != NULL) { /* Seek requests over 1GiB are broken down into * multiple seeks. This avoids overflows when the * requests get passed through 32-bit arguments. */ int64_t skip_limit = (int64_t)1 << 30; int64_t total = 0; for (;;) { int64_t get, ask = request; if (ask > skip_limit) ask = skip_limit; get = (self->archive->client.skipper) (&self->archive->archive, self->data, ask); if (get == 0) return (total); request -= get; total += get; } } else if (self->archive->client.seeker != NULL && request > 64 * 1024) { /* If the client provided a seeker but not a skipper, * we can use the seeker to skip forward. * * Note: This isn't always a good idea. The client * skipper is allowed to skip by less than requested * if it needs to maintain block alignment. The * seeker is not allowed to play such games, so using * the seeker here may be a performance loss compared * to just reading and discarding. That's why we * only do this for skips of over 64k. */ int64_t before = self->position; int64_t after = (self->archive->client.seeker) (&self->archive->archive, self->data, request, SEEK_CUR); if (after != before + request) return ARCHIVE_FATAL; return after - before; } return 0; } static int64_t client_seek_proxy(struct archive_read_filter *self, int64_t offset, int whence) { /* DO NOT use the skipper here! If we transparently handled * forward seek here by using the skipper, that will break * other libarchive code that assumes a successful forward * seek means it can also seek backwards. */ if (self->archive->client.seeker == NULL) return (ARCHIVE_FAILED); return (self->archive->client.seeker)(&self->archive->archive, self->data, offset, whence); } static int client_close_proxy(struct archive_read_filter *self) { int r = ARCHIVE_OK, r2; unsigned int i; if (self->archive->client.closer == NULL) return (r); for (i = 0; i < self->archive->client.nodes; i++) { r2 = (self->archive->client.closer) ((struct archive *)self->archive, self->archive->client.dataset[i].data); if (r > r2) r = r2; } return (r); } static int client_open_proxy(struct archive_read_filter *self) { int r = ARCHIVE_OK; if (self->archive->client.opener != NULL) r = (self->archive->client.opener)( (struct archive *)self->archive, self->data); return (r); } static int client_switch_proxy(struct archive_read_filter *self, unsigned int iindex) { int r1 = ARCHIVE_OK, r2 = ARCHIVE_OK; void *data2 = NULL; /* Don't do anything if already in the specified data node */ if (self->archive->client.cursor == iindex) return (ARCHIVE_OK); self->archive->client.cursor = iindex; data2 = self->archive->client.dataset[self->archive->client.cursor].data; if (self->archive->client.switcher != NULL) { r1 = r2 = (self->archive->client.switcher) ((struct archive *)self->archive, self->data, data2); self->data = data2; } else { /* Attempt to call close and open instead */ if (self->archive->client.closer != NULL) r1 = (self->archive->client.closer) ((struct archive *)self->archive, self->data); self->data = data2; if (self->archive->client.opener != NULL) r2 = (self->archive->client.opener) ((struct archive *)self->archive, self->data); } return (r1 < r2) ? r1 : r2; } int archive_read_set_open_callback(struct archive *_a, archive_open_callback *client_opener) { struct archive_read *a = (struct archive_read *)_a; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_set_open_callback"); a->client.opener = client_opener; return ARCHIVE_OK; } int archive_read_set_read_callback(struct archive *_a, archive_read_callback *client_reader) { struct archive_read *a = (struct archive_read *)_a; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_set_read_callback"); a->client.reader = client_reader; return ARCHIVE_OK; } int archive_read_set_skip_callback(struct archive *_a, archive_skip_callback *client_skipper) { struct archive_read *a = (struct archive_read *)_a; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_set_skip_callback"); a->client.skipper = client_skipper; return ARCHIVE_OK; } int archive_read_set_seek_callback(struct archive *_a, archive_seek_callback *client_seeker) { struct archive_read *a = (struct archive_read *)_a; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_set_seek_callback"); a->client.seeker = client_seeker; return ARCHIVE_OK; } int archive_read_set_close_callback(struct archive *_a, archive_close_callback *client_closer) { struct archive_read *a = (struct archive_read *)_a; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_set_close_callback"); a->client.closer = client_closer; return ARCHIVE_OK; } int archive_read_set_switch_callback(struct archive *_a, archive_switch_callback *client_switcher) { struct archive_read *a = (struct archive_read *)_a; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_set_switch_callback"); a->client.switcher = client_switcher; return ARCHIVE_OK; } int archive_read_set_callback_data(struct archive *_a, void *client_data) { return archive_read_set_callback_data2(_a, client_data, 0); } int archive_read_set_callback_data2(struct archive *_a, void *client_data, unsigned int iindex) { struct archive_read *a = (struct archive_read *)_a; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_set_callback_data2"); if (a->client.nodes == 0) { a->client.dataset = (struct archive_read_data_node *) calloc(1, sizeof(*a->client.dataset)); if (a->client.dataset == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory."); return ARCHIVE_FATAL; } a->client.nodes = 1; } if (iindex > a->client.nodes - 1) { archive_set_error(&a->archive, EINVAL, "Invalid index specified."); return ARCHIVE_FATAL; } a->client.dataset[iindex].data = client_data; a->client.dataset[iindex].begin_position = -1; a->client.dataset[iindex].total_size = -1; return ARCHIVE_OK; } int archive_read_add_callback_data(struct archive *_a, void *client_data, unsigned int iindex) { struct archive_read *a = (struct archive_read *)_a; void *p; unsigned int i; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_add_callback_data"); if (iindex > a->client.nodes) { archive_set_error(&a->archive, EINVAL, "Invalid index specified."); return ARCHIVE_FATAL; } p = realloc(a->client.dataset, sizeof(*a->client.dataset) * (++(a->client.nodes))); if (p == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory."); return ARCHIVE_FATAL; } a->client.dataset = (struct archive_read_data_node *)p; for (i = a->client.nodes - 1; i > iindex && i > 0; i--) { a->client.dataset[i].data = a->client.dataset[i-1].data; a->client.dataset[i].begin_position = -1; a->client.dataset[i].total_size = -1; } a->client.dataset[iindex].data = client_data; a->client.dataset[iindex].begin_position = -1; a->client.dataset[iindex].total_size = -1; return ARCHIVE_OK; } int archive_read_append_callback_data(struct archive *_a, void *client_data) { struct archive_read *a = (struct archive_read *)_a; return archive_read_add_callback_data(_a, client_data, a->client.nodes); } int archive_read_prepend_callback_data(struct archive *_a, void *client_data) { return archive_read_add_callback_data(_a, client_data, 0); } int archive_read_open1(struct archive *_a) { struct archive_read *a = (struct archive_read *)_a; struct archive_read_filter *filter, *tmp; int slot; int e = 0; unsigned int i; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_open"); archive_clear_error(&a->archive); if (a->client.reader == NULL) { archive_set_error(&a->archive, EINVAL, "No reader function provided to archive_read_open"); a->archive.state = ARCHIVE_STATE_FATAL; return (ARCHIVE_FATAL); } /* Open data source. */ if (a->client.opener != NULL) { e = (a->client.opener)(&a->archive, a->client.dataset[0].data); if (e != 0) { /* If the open failed, call the closer to clean up. */ if (a->client.closer) { for (i = 0; i < a->client.nodes; i++) (a->client.closer)(&a->archive, a->client.dataset[i].data); } return (e); } } filter = calloc(1, sizeof(*filter)); if (filter == NULL) return (ARCHIVE_FATAL); filter->bidder = NULL; filter->upstream = NULL; filter->archive = a; filter->data = a->client.dataset[0].data; filter->open = client_open_proxy; filter->read = client_read_proxy; filter->skip = client_skip_proxy; filter->seek = client_seek_proxy; filter->close = client_close_proxy; filter->sswitch = client_switch_proxy; filter->name = "none"; filter->code = ARCHIVE_FILTER_NONE; a->client.dataset[0].begin_position = 0; if (!a->filter || !a->bypass_filter_bidding) { a->filter = filter; /* Build out the input pipeline. */ e = choose_filters(a); if (e < ARCHIVE_WARN) { a->archive.state = ARCHIVE_STATE_FATAL; return (ARCHIVE_FATAL); } } else { /* Need to add "NONE" type filter at the end of the filter chain */ tmp = a->filter; while (tmp->upstream) tmp = tmp->upstream; tmp->upstream = filter; } if (!a->format) { slot = choose_format(a); if (slot < 0) { __archive_read_close_filters(a); a->archive.state = ARCHIVE_STATE_FATAL; return (ARCHIVE_FATAL); } a->format = &(a->formats[slot]); } a->archive.state = ARCHIVE_STATE_HEADER; /* Ensure libarchive starts from the first node in a multivolume set */ client_switch_proxy(a->filter, 0); return (e); } /* * Allow each registered stream transform to bid on whether * it wants to handle this stream. Repeat until we've finished * building the pipeline. */ static int choose_filters(struct archive_read *a) { - int number_bidders, i, bid, best_bid; + int number_bidders, i, bid, best_bid, n; struct archive_read_filter_bidder *bidder, *best_bidder; struct archive_read_filter *filter; ssize_t avail; int r; - for (;;) { + for (n = 0; n < 25; ++n) { number_bidders = sizeof(a->bidders) / sizeof(a->bidders[0]); best_bid = 0; best_bidder = NULL; bidder = a->bidders; for (i = 0; i < number_bidders; i++, bidder++) { if (bidder->bid != NULL) { bid = (bidder->bid)(bidder, a->filter); if (bid > best_bid) { best_bid = bid; best_bidder = bidder; } } } /* If no bidder, we're done. */ if (best_bidder == NULL) { /* Verify the filter by asking it for some data. */ __archive_read_filter_ahead(a->filter, 1, &avail); if (avail < 0) { __archive_read_close_filters(a); __archive_read_free_filters(a); return (ARCHIVE_FATAL); } a->archive.compression_name = a->filter->name; a->archive.compression_code = a->filter->code; return (ARCHIVE_OK); } filter = (struct archive_read_filter *)calloc(1, sizeof(*filter)); if (filter == NULL) return (ARCHIVE_FATAL); filter->bidder = best_bidder; filter->archive = a; filter->upstream = a->filter; a->filter = filter; r = (best_bidder->init)(a->filter); if (r != ARCHIVE_OK) { __archive_read_close_filters(a); __archive_read_free_filters(a); return (ARCHIVE_FATAL); } } + archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, + "Input requires too many filters for decoding"); + return (ARCHIVE_FATAL); } /* * Read header of next entry. */ static int _archive_read_next_header2(struct archive *_a, struct archive_entry *entry) { struct archive_read *a = (struct archive_read *)_a; int r1 = ARCHIVE_OK, r2; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_HEADER | ARCHIVE_STATE_DATA, "archive_read_next_header"); archive_entry_clear(entry); archive_clear_error(&a->archive); /* * If client didn't consume entire data, skip any remainder * (This is especially important for GNU incremental directories.) */ if (a->archive.state == ARCHIVE_STATE_DATA) { r1 = archive_read_data_skip(&a->archive); if (r1 == ARCHIVE_EOF) archive_set_error(&a->archive, EIO, "Premature end-of-file."); if (r1 == ARCHIVE_EOF || r1 == ARCHIVE_FATAL) { a->archive.state = ARCHIVE_STATE_FATAL; return (ARCHIVE_FATAL); } } /* Record start-of-header offset in uncompressed stream. */ a->header_position = a->filter->position; ++_a->file_count; r2 = (a->format->read_header)(a, entry); /* * EOF and FATAL are persistent at this layer. By * modifying the state, we guarantee that future calls to * read a header or read data will fail. */ switch (r2) { case ARCHIVE_EOF: a->archive.state = ARCHIVE_STATE_EOF; --_a->file_count;/* Revert a file counter. */ break; case ARCHIVE_OK: a->archive.state = ARCHIVE_STATE_DATA; break; case ARCHIVE_WARN: a->archive.state = ARCHIVE_STATE_DATA; break; case ARCHIVE_RETRY: break; case ARCHIVE_FATAL: a->archive.state = ARCHIVE_STATE_FATAL; break; } a->read_data_output_offset = 0; a->read_data_remaining = 0; a->read_data_is_posix_read = 0; a->read_data_requested = 0; a->data_start_node = a->client.cursor; /* EOF always wins; otherwise return the worst error. */ return (r2 < r1 || r2 == ARCHIVE_EOF) ? r2 : r1; } int _archive_read_next_header(struct archive *_a, struct archive_entry **entryp) { int ret; struct archive_read *a = (struct archive_read *)_a; *entryp = NULL; ret = _archive_read_next_header2(_a, a->entry); *entryp = a->entry; return ret; } /* * Allow each registered format to bid on whether it wants to handle * the next entry. Return index of winning bidder. */ static int choose_format(struct archive_read *a) { int slots; int i; int bid, best_bid; int best_bid_slot; slots = sizeof(a->formats) / sizeof(a->formats[0]); best_bid = -1; best_bid_slot = -1; /* Set up a->format for convenience of bidders. */ a->format = &(a->formats[0]); for (i = 0; i < slots; i++, a->format++) { if (a->format->bid) { bid = (a->format->bid)(a, best_bid); if (bid == ARCHIVE_FATAL) return (ARCHIVE_FATAL); if (a->filter->position != 0) __archive_read_seek(a, 0, SEEK_SET); if ((bid > best_bid) || (best_bid_slot < 0)) { best_bid = bid; best_bid_slot = i; } } } /* * There were no bidders; this is a serious programmer error * and demands a quick and definitive abort. */ if (best_bid_slot < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "No formats registered"); return (ARCHIVE_FATAL); } /* * There were bidders, but no non-zero bids; this means we * can't support this stream. */ if (best_bid < 1) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unrecognized archive format"); return (ARCHIVE_FATAL); } return (best_bid_slot); } /* * Return the file offset (within the uncompressed data stream) where * the last header started. */ int64_t archive_read_header_position(struct archive *_a) { struct archive_read *a = (struct archive_read *)_a; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_ANY, "archive_read_header_position"); return (a->header_position); } /* * Read data from an archive entry, using a read(2)-style interface. * This is a convenience routine that just calls * archive_read_data_block and copies the results into the client * buffer, filling any gaps with zero bytes. Clients using this * API can be completely ignorant of sparse-file issues; sparse files * will simply be padded with nulls. * * DO NOT intermingle calls to this function and archive_read_data_block * to read a single entry body. */ ssize_t archive_read_data(struct archive *_a, void *buff, size_t s) { struct archive_read *a = (struct archive_read *)_a; char *dest; const void *read_buf; size_t bytes_read; size_t len; int r; bytes_read = 0; dest = (char *)buff; while (s > 0) { if (a->read_data_remaining == 0) { read_buf = a->read_data_block; a->read_data_is_posix_read = 1; a->read_data_requested = s; r = _archive_read_data_block(&a->archive, &read_buf, &a->read_data_remaining, &a->read_data_offset); a->read_data_block = read_buf; if (r == ARCHIVE_EOF) return (bytes_read); /* * Error codes are all negative, so the status * return here cannot be confused with a valid * byte count. (ARCHIVE_OK is zero.) */ if (r < ARCHIVE_OK) return (r); } if (a->read_data_offset < a->read_data_output_offset) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Encountered out-of-order sparse blocks"); return (ARCHIVE_RETRY); } /* Compute the amount of zero padding needed. */ if (a->read_data_output_offset + (int64_t)s < a->read_data_offset) { len = s; } else if (a->read_data_output_offset < a->read_data_offset) { len = (size_t)(a->read_data_offset - a->read_data_output_offset); } else len = 0; /* Add zeroes. */ memset(dest, 0, len); s -= len; a->read_data_output_offset += len; dest += len; bytes_read += len; /* Copy data if there is any space left. */ if (s > 0) { len = a->read_data_remaining; if (len > s) len = s; memcpy(dest, a->read_data_block, len); s -= len; a->read_data_block += len; a->read_data_remaining -= len; a->read_data_output_offset += len; a->read_data_offset += len; dest += len; bytes_read += len; } } a->read_data_is_posix_read = 0; a->read_data_requested = 0; return (bytes_read); } /* * Skip over all remaining data in this entry. */ int archive_read_data_skip(struct archive *_a) { struct archive_read *a = (struct archive_read *)_a; int r; const void *buff; size_t size; int64_t offset; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_DATA, "archive_read_data_skip"); if (a->format->read_data_skip != NULL) r = (a->format->read_data_skip)(a); else { while ((r = archive_read_data_block(&a->archive, &buff, &size, &offset)) == ARCHIVE_OK) ; } if (r == ARCHIVE_EOF) r = ARCHIVE_OK; a->archive.state = ARCHIVE_STATE_HEADER; return (r); } int64_t archive_seek_data(struct archive *_a, int64_t offset, int whence) { struct archive_read *a = (struct archive_read *)_a; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_DATA, "archive_seek_data_block"); if (a->format->seek_data == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "Internal error: " "No format_seek_data_block function registered"); return (ARCHIVE_FATAL); } return (a->format->seek_data)(a, offset, whence); } /* * Read the next block of entry data from the archive. * This is a zero-copy interface; the client receives a pointer, * size, and file offset of the next available block of data. * * Returns ARCHIVE_OK if the operation is successful, ARCHIVE_EOF if * the end of entry is encountered. */ static int _archive_read_data_block(struct archive *_a, const void **buff, size_t *size, int64_t *offset) { struct archive_read *a = (struct archive_read *)_a; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_DATA, "archive_read_data_block"); if (a->format->read_data == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "Internal error: " "No format_read_data_block function registered"); return (ARCHIVE_FATAL); } return (a->format->read_data)(a, buff, size, offset); } int __archive_read_close_filters(struct archive_read *a) { struct archive_read_filter *f = a->filter; int r = ARCHIVE_OK; /* Close each filter in the pipeline. */ while (f != NULL) { struct archive_read_filter *t = f->upstream; if (!f->closed && f->close != NULL) { int r1 = (f->close)(f); f->closed = 1; if (r1 < r) r = r1; } free(f->buffer); f->buffer = NULL; f = t; } return r; } void __archive_read_free_filters(struct archive_read *a) { while (a->filter != NULL) { struct archive_read_filter *t = a->filter->upstream; free(a->filter); a->filter = t; } } /* * return the count of # of filters in use */ static int _archive_filter_count(struct archive *_a) { struct archive_read *a = (struct archive_read *)_a; struct archive_read_filter *p = a->filter; int count = 0; while(p) { count++; p = p->upstream; } return count; } /* * Close the file and all I/O. */ static int _archive_read_close(struct archive *_a) { struct archive_read *a = (struct archive_read *)_a; int r = ARCHIVE_OK, r1 = ARCHIVE_OK; archive_check_magic(&a->archive, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_ANY | ARCHIVE_STATE_FATAL, "archive_read_close"); if (a->archive.state == ARCHIVE_STATE_CLOSED) return (ARCHIVE_OK); archive_clear_error(&a->archive); a->archive.state = ARCHIVE_STATE_CLOSED; /* TODO: Clean up the formatters. */ /* Release the filter objects. */ r1 = __archive_read_close_filters(a); if (r1 < r) r = r1; return (r); } /* * Release memory and other resources. */ static int _archive_read_free(struct archive *_a) { struct archive_read *a = (struct archive_read *)_a; int i, n; int slots; int r = ARCHIVE_OK; if (_a == NULL) return (ARCHIVE_OK); archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_ANY | ARCHIVE_STATE_FATAL, "archive_read_free"); if (a->archive.state != ARCHIVE_STATE_CLOSED && a->archive.state != ARCHIVE_STATE_FATAL) r = archive_read_close(&a->archive); /* Call cleanup functions registered by optional components. */ if (a->cleanup_archive_extract != NULL) r = (a->cleanup_archive_extract)(a); /* Cleanup format-specific data. */ slots = sizeof(a->formats) / sizeof(a->formats[0]); for (i = 0; i < slots; i++) { a->format = &(a->formats[i]); if (a->formats[i].cleanup) (a->formats[i].cleanup)(a); } /* Free the filters */ __archive_read_free_filters(a); /* Release the bidder objects. */ n = sizeof(a->bidders)/sizeof(a->bidders[0]); for (i = 0; i < n; i++) { if (a->bidders[i].free != NULL) { int r1 = (a->bidders[i].free)(&a->bidders[i]); if (r1 < r) r = r1; } } archive_string_free(&a->archive.error_string); if (a->entry) archive_entry_free(a->entry); a->archive.magic = 0; __archive_clean(&a->archive); free(a->client.dataset); free(a); return (r); } static struct archive_read_filter * get_filter(struct archive *_a, int n) { struct archive_read *a = (struct archive_read *)_a; struct archive_read_filter *f = a->filter; /* We use n == -1 for 'the last filter', which is always the * client proxy. */ if (n == -1 && f != NULL) { struct archive_read_filter *last = f; f = f->upstream; while (f != NULL) { last = f; f = f->upstream; } return (last); } if (n < 0) return NULL; while (n > 0 && f != NULL) { f = f->upstream; --n; } return (f); } static int _archive_filter_code(struct archive *_a, int n) { struct archive_read_filter *f = get_filter(_a, n); return f == NULL ? -1 : f->code; } static const char * _archive_filter_name(struct archive *_a, int n) { struct archive_read_filter *f = get_filter(_a, n); return f == NULL ? NULL : f->name; } static int64_t _archive_filter_bytes(struct archive *_a, int n) { struct archive_read_filter *f = get_filter(_a, n); return f == NULL ? -1 : f->position; } /* * Used internally by read format handlers to register their bid and * initialization functions. */ int __archive_read_register_format(struct archive_read *a, void *format_data, const char *name, int (*bid)(struct archive_read *, int), int (*options)(struct archive_read *, const char *, const char *), int (*read_header)(struct archive_read *, struct archive_entry *), int (*read_data)(struct archive_read *, const void **, size_t *, int64_t *), int (*read_data_skip)(struct archive_read *), int64_t (*seek_data)(struct archive_read *, int64_t, int), int (*cleanup)(struct archive_read *)) { int i, number_slots; archive_check_magic(&a->archive, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "__archive_read_register_format"); number_slots = sizeof(a->formats) / sizeof(a->formats[0]); for (i = 0; i < number_slots; i++) { if (a->formats[i].bid == bid) return (ARCHIVE_WARN); /* We've already installed */ if (a->formats[i].bid == NULL) { a->formats[i].bid = bid; a->formats[i].options = options; a->formats[i].read_header = read_header; a->formats[i].read_data = read_data; a->formats[i].read_data_skip = read_data_skip; a->formats[i].seek_data = seek_data; a->formats[i].cleanup = cleanup; a->formats[i].data = format_data; a->formats[i].name = name; return (ARCHIVE_OK); } } archive_set_error(&a->archive, ENOMEM, "Not enough slots for format registration"); return (ARCHIVE_FATAL); } /* * Used internally by decompression routines to register their bid and * initialization functions. */ int __archive_read_get_bidder(struct archive_read *a, struct archive_read_filter_bidder **bidder) { int i, number_slots; number_slots = sizeof(a->bidders) / sizeof(a->bidders[0]); for (i = 0; i < number_slots; i++) { if (a->bidders[i].bid == NULL) { memset(a->bidders + i, 0, sizeof(a->bidders[0])); *bidder = (a->bidders + i); return (ARCHIVE_OK); } } archive_set_error(&a->archive, ENOMEM, "Not enough slots for filter registration"); return (ARCHIVE_FATAL); } /* * The next section implements the peek/consume internal I/O * system used by archive readers. This system allows simple * read-ahead for consumers while preserving zero-copy operation * most of the time. * * The two key operations: * * The read-ahead function returns a pointer to a block of data * that satisfies a minimum request. * * The consume function advances the file pointer. * * In the ideal case, filters generate blocks of data * and __archive_read_ahead() just returns pointers directly into * those blocks. Then __archive_read_consume() just bumps those * pointers. Only if your request would span blocks does the I/O * layer use a copy buffer to provide you with a contiguous block of * data. * * A couple of useful idioms: * * "I just want some data." Ask for 1 byte and pay attention to * the "number of bytes available" from __archive_read_ahead(). * Consume whatever you actually use. * * "I want to output a large block of data." As above, ask for 1 byte, * emit all that's available (up to whatever limit you have), consume * it all, then repeat until you're done. This effectively means that * you're passing along the blocks that came from your provider. * * "I want to peek ahead by a large amount." Ask for 4k or so, then * double and repeat until you get an error or have enough. Note * that the I/O layer will likely end up expanding its copy buffer * to fit your request, so use this technique cautiously. This * technique is used, for example, by some of the format tasting * code that has uncertain look-ahead needs. */ /* * Looks ahead in the input stream: * * If 'avail' pointer is provided, that returns number of bytes available * in the current buffer, which may be much larger than requested. * * If end-of-file, *avail gets set to zero. * * If error, *avail gets error code. * * If request can be met, returns pointer to data. * * If minimum request cannot be met, returns NULL. * * Note: If you just want "some data", ask for 1 byte and pay attention * to *avail, which will have the actual amount available. If you * know exactly how many bytes you need, just ask for that and treat * a NULL return as an error. * * Important: This does NOT move the file pointer. See * __archive_read_consume() below. */ const void * __archive_read_ahead(struct archive_read *a, size_t min, ssize_t *avail) { return (__archive_read_filter_ahead(a->filter, min, avail)); } const void * __archive_read_filter_ahead(struct archive_read_filter *filter, size_t min, ssize_t *avail) { ssize_t bytes_read; size_t tocopy; if (filter->fatal) { if (avail) *avail = ARCHIVE_FATAL; return (NULL); } /* * Keep pulling more data until we can satisfy the request. */ for (;;) { /* * If we can satisfy from the copy buffer (and the * copy buffer isn't empty), we're done. In particular, * note that min == 0 is a perfectly well-defined * request. */ if (filter->avail >= min && filter->avail > 0) { if (avail != NULL) *avail = filter->avail; return (filter->next); } /* * We can satisfy directly from client buffer if everything * currently in the copy buffer is still in the client buffer. */ if (filter->client_total >= filter->client_avail + filter->avail && filter->client_avail + filter->avail >= min) { /* "Roll back" to client buffer. */ filter->client_avail += filter->avail; filter->client_next -= filter->avail; /* Copy buffer is now empty. */ filter->avail = 0; filter->next = filter->buffer; /* Return data from client buffer. */ if (avail != NULL) *avail = filter->client_avail; return (filter->client_next); } /* Move data forward in copy buffer if necessary. */ if (filter->next > filter->buffer && filter->next + min > filter->buffer + filter->buffer_size) { if (filter->avail > 0) memmove(filter->buffer, filter->next, filter->avail); filter->next = filter->buffer; } /* If we've used up the client data, get more. */ if (filter->client_avail <= 0) { if (filter->end_of_file) { if (avail != NULL) *avail = 0; return (NULL); } bytes_read = (filter->read)(filter, &filter->client_buff); if (bytes_read < 0) { /* Read error. */ filter->client_total = filter->client_avail = 0; filter->client_next = filter->client_buff = NULL; filter->fatal = 1; if (avail != NULL) *avail = ARCHIVE_FATAL; return (NULL); } if (bytes_read == 0) { /* Check for another client object first */ if (filter->archive->client.cursor != filter->archive->client.nodes - 1) { if (client_switch_proxy(filter, filter->archive->client.cursor + 1) == ARCHIVE_OK) continue; } /* Premature end-of-file. */ filter->client_total = filter->client_avail = 0; filter->client_next = filter->client_buff = NULL; filter->end_of_file = 1; /* Return whatever we do have. */ if (avail != NULL) *avail = filter->avail; return (NULL); } filter->client_total = bytes_read; filter->client_avail = filter->client_total; filter->client_next = filter->client_buff; } else { /* * We can't satisfy the request from the copy * buffer or the existing client data, so we * need to copy more client data over to the * copy buffer. */ /* Ensure the buffer is big enough. */ if (min > filter->buffer_size) { size_t s, t; char *p; /* Double the buffer; watch for overflow. */ s = t = filter->buffer_size; if (s == 0) s = min; while (s < min) { t *= 2; if (t <= s) { /* Integer overflow! */ archive_set_error( &filter->archive->archive, ENOMEM, "Unable to allocate copy" " buffer"); filter->fatal = 1; if (avail != NULL) *avail = ARCHIVE_FATAL; return (NULL); } s = t; } /* Now s >= min, so allocate a new buffer. */ p = (char *)malloc(s); if (p == NULL) { archive_set_error( &filter->archive->archive, ENOMEM, "Unable to allocate copy buffer"); filter->fatal = 1; if (avail != NULL) *avail = ARCHIVE_FATAL; return (NULL); } /* Move data into newly-enlarged buffer. */ if (filter->avail > 0) memmove(p, filter->next, filter->avail); free(filter->buffer); filter->next = filter->buffer = p; filter->buffer_size = s; } /* We can add client data to copy buffer. */ /* First estimate: copy to fill rest of buffer. */ tocopy = (filter->buffer + filter->buffer_size) - (filter->next + filter->avail); /* Don't waste time buffering more than we need to. */ if (tocopy + filter->avail > min) tocopy = min - filter->avail; /* Don't copy more than is available. */ if (tocopy > filter->client_avail) tocopy = filter->client_avail; memcpy(filter->next + filter->avail, filter->client_next, tocopy); /* Remove this data from client buffer. */ filter->client_next += tocopy; filter->client_avail -= tocopy; /* add it to copy buffer. */ filter->avail += tocopy; } } } /* * Move the file pointer forward. */ int64_t __archive_read_consume(struct archive_read *a, int64_t request) { return (__archive_read_filter_consume(a->filter, request)); } int64_t __archive_read_filter_consume(struct archive_read_filter * filter, int64_t request) { int64_t skipped; if (request < 0) return ARCHIVE_FATAL; if (request == 0) return 0; skipped = advance_file_pointer(filter, request); if (skipped == request) return (skipped); /* We hit EOF before we satisfied the skip request. */ if (skipped < 0) /* Map error code to 0 for error message below. */ skipped = 0; archive_set_error(&filter->archive->archive, ARCHIVE_ERRNO_MISC, "Truncated input file (needed %jd bytes, only %jd available)", (intmax_t)request, (intmax_t)skipped); return (ARCHIVE_FATAL); } /* * Advance the file pointer by the amount requested. * Returns the amount actually advanced, which may be less than the * request if EOF is encountered first. * Returns a negative value if there's an I/O error. */ static int64_t advance_file_pointer(struct archive_read_filter *filter, int64_t request) { int64_t bytes_skipped, total_bytes_skipped = 0; ssize_t bytes_read; size_t min; if (filter->fatal) return (-1); /* Use up the copy buffer first. */ if (filter->avail > 0) { min = (size_t)minimum(request, (int64_t)filter->avail); filter->next += min; filter->avail -= min; request -= min; filter->position += min; total_bytes_skipped += min; } /* Then use up the client buffer. */ if (filter->client_avail > 0) { min = (size_t)minimum(request, (int64_t)filter->client_avail); filter->client_next += min; filter->client_avail -= min; request -= min; filter->position += min; total_bytes_skipped += min; } if (request == 0) return (total_bytes_skipped); /* If there's an optimized skip function, use it. */ if (filter->skip != NULL) { bytes_skipped = (filter->skip)(filter, request); if (bytes_skipped < 0) { /* error */ filter->fatal = 1; return (bytes_skipped); } filter->position += bytes_skipped; total_bytes_skipped += bytes_skipped; request -= bytes_skipped; if (request == 0) return (total_bytes_skipped); } /* Use ordinary reads as necessary to complete the request. */ for (;;) { bytes_read = (filter->read)(filter, &filter->client_buff); if (bytes_read < 0) { filter->client_buff = NULL; filter->fatal = 1; return (bytes_read); } if (bytes_read == 0) { if (filter->archive->client.cursor != filter->archive->client.nodes - 1) { if (client_switch_proxy(filter, filter->archive->client.cursor + 1) == ARCHIVE_OK) continue; } filter->client_buff = NULL; filter->end_of_file = 1; return (total_bytes_skipped); } if (bytes_read >= request) { filter->client_next = ((const char *)filter->client_buff) + request; filter->client_avail = (size_t)(bytes_read - request); filter->client_total = bytes_read; total_bytes_skipped += request; filter->position += request; return (total_bytes_skipped); } filter->position += bytes_read; total_bytes_skipped += bytes_read; request -= bytes_read; } } /** * Returns ARCHIVE_FAILED if seeking isn't supported. */ int64_t __archive_read_seek(struct archive_read *a, int64_t offset, int whence) { return __archive_read_filter_seek(a->filter, offset, whence); } int64_t __archive_read_filter_seek(struct archive_read_filter *filter, int64_t offset, int whence) { struct archive_read_client *client; int64_t r; unsigned int cursor; if (filter->closed || filter->fatal) return (ARCHIVE_FATAL); if (filter->seek == NULL) return (ARCHIVE_FAILED); client = &(filter->archive->client); switch (whence) { case SEEK_CUR: /* Adjust the offset and use SEEK_SET instead */ offset += filter->position; case SEEK_SET: cursor = 0; while (1) { if (client->dataset[cursor].begin_position < 0 || client->dataset[cursor].total_size < 0 || client->dataset[cursor].begin_position + client->dataset[cursor].total_size - 1 > offset || cursor + 1 >= client->nodes) break; r = client->dataset[cursor].begin_position + client->dataset[cursor].total_size; client->dataset[++cursor].begin_position = r; } while (1) { r = client_switch_proxy(filter, cursor); if (r != ARCHIVE_OK) return r; if ((r = client_seek_proxy(filter, 0, SEEK_END)) < 0) return r; client->dataset[cursor].total_size = r; if (client->dataset[cursor].begin_position + client->dataset[cursor].total_size - 1 > offset || cursor + 1 >= client->nodes) break; r = client->dataset[cursor].begin_position + client->dataset[cursor].total_size; client->dataset[++cursor].begin_position = r; } offset -= client->dataset[cursor].begin_position; if (offset < 0) offset = 0; else if (offset > client->dataset[cursor].total_size - 1) offset = client->dataset[cursor].total_size - 1; if ((r = client_seek_proxy(filter, offset, SEEK_SET)) < 0) return r; break; case SEEK_END: cursor = 0; while (1) { if (client->dataset[cursor].begin_position < 0 || client->dataset[cursor].total_size < 0 || cursor + 1 >= client->nodes) break; r = client->dataset[cursor].begin_position + client->dataset[cursor].total_size; client->dataset[++cursor].begin_position = r; } while (1) { r = client_switch_proxy(filter, cursor); if (r != ARCHIVE_OK) return r; if ((r = client_seek_proxy(filter, 0, SEEK_END)) < 0) return r; client->dataset[cursor].total_size = r; r = client->dataset[cursor].begin_position + client->dataset[cursor].total_size; if (cursor + 1 >= client->nodes) break; client->dataset[++cursor].begin_position = r; } while (1) { if (r + offset >= client->dataset[cursor].begin_position) break; offset += client->dataset[cursor].total_size; if (cursor == 0) break; cursor--; r = client->dataset[cursor].begin_position + client->dataset[cursor].total_size; } offset = (r + offset) - client->dataset[cursor].begin_position; if ((r = client_switch_proxy(filter, cursor)) != ARCHIVE_OK) return r; r = client_seek_proxy(filter, offset, SEEK_SET); if (r < ARCHIVE_OK) return r; break; default: return (ARCHIVE_FATAL); } r += client->dataset[cursor].begin_position; if (r >= 0) { /* * Ouch. Clearing the buffer like this hurts, especially * at bid time. A lot of our efficiency at bid time comes * from having bidders reuse the data we've already read. * * TODO: If the seek request is in data we already * have, then don't call the seek callback. * * TODO: Zip seeks to end-of-file at bid time. If * other formats also start doing this, we may need to * find a way for clients to fudge the seek offset to * a block boundary. * * Hmmm... If whence was SEEK_END, we know the file * size is (r - offset). Can we use that to simplify * the TODO items above? */ filter->avail = filter->client_avail = 0; filter->next = filter->buffer; filter->position = r; filter->end_of_file = 0; } return r; } Index: projects/release-pkg/contrib/libarchive/libarchive =================================================================== --- projects/release-pkg/contrib/libarchive/libarchive (revision 295925) +++ projects/release-pkg/contrib/libarchive/libarchive (revision 295926) Property changes on: projects/release-pkg/contrib/libarchive/libarchive ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,2 ## Merged /head/contrib/libarchive/libarchive:r289091-289384,293171-295925 Merged /vendor/libarchive/dist/libarchive:r295913 Index: projects/release-pkg/contrib/libarchive =================================================================== --- projects/release-pkg/contrib/libarchive (revision 295925) +++ projects/release-pkg/contrib/libarchive (revision 295926) Property changes on: projects/release-pkg/contrib/libarchive ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,2 ## Merged /head/contrib/libarchive:r289091-289384,293171-295925 Merged /vendor/libarchive/dist:r295913 Index: projects/release-pkg/lib/libc/db/hash/hash.c =================================================================== --- projects/release-pkg/lib/libc/db/hash/hash.c (revision 295925) +++ projects/release-pkg/lib/libc/db/hash/hash.c (revision 295926) @@ -1,963 +1,967 @@ /*- * Copyright (c) 1990, 1993, 1994 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Margo Seltzer. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(LIBC_SCCS) && !defined(lint) static char sccsid[] = "@(#)hash.c 8.9 (Berkeley) 6/16/94"; #endif /* LIBC_SCCS and not lint */ #include __FBSDID("$FreeBSD$"); #include "namespace.h" #include #include #include #include #include #include #include #include #ifdef DEBUG #include #endif #include "un-namespace.h" #include #include "hash.h" #include "page.h" #include "extern.h" static int alloc_segs(HTAB *, int); static int flush_meta(HTAB *); static int hash_access(HTAB *, ACTION, DBT *, DBT *); static int hash_close(DB *); static int hash_delete(const DB *, const DBT *, u_int32_t); static int hash_fd(const DB *); static int hash_get(const DB *, const DBT *, DBT *, u_int32_t); static int hash_put(const DB *, DBT *, const DBT *, u_int32_t); static void *hash_realloc(SEGMENT **, int, int); static int hash_seq(const DB *, DBT *, DBT *, u_int32_t); static int hash_sync(const DB *, u_int32_t); static int hdestroy(HTAB *); static HTAB *init_hash(HTAB *, const char *, const HASHINFO *); static int init_htab(HTAB *, int); #if BYTE_ORDER == LITTLE_ENDIAN static void swap_header(HTAB *); static void swap_header_copy(HASHHDR *, HASHHDR *); #endif /* Fast arithmetic, relying on powers of 2, */ #define MOD(x, y) ((x) & ((y) - 1)) #define RETURN_ERROR(ERR, LOC) { save_errno = ERR; goto LOC; } /* Return values */ #define SUCCESS (0) #define ERROR (-1) #define ABNORMAL (1) #ifdef HASH_STATISTICS int hash_accesses, hash_collisions, hash_expansions, hash_overflows; #endif /************************** INTERFACE ROUTINES ***************************/ /* OPEN/CLOSE */ /* ARGSUSED */ DB * __hash_open(const char *file, int flags, int mode, const HASHINFO *info, /* Special directives for create */ int dflags) { HTAB *hashp; struct stat statbuf; DB *dbp; int bpages, hdrsize, new_table, nsegs, save_errno; if ((flags & O_ACCMODE) == O_WRONLY) { errno = EINVAL; return (NULL); } if (!(hashp = (HTAB *)calloc(1, sizeof(HTAB)))) return (NULL); hashp->fp = -1; /* * Even if user wants write only, we need to be able to read * the actual file, so we need to open it read/write. But, the * field in the hashp structure needs to be accurate so that * we can check accesses. */ hashp->flags = flags; if (file) { if ((hashp->fp = _open(file, flags | O_CLOEXEC, mode)) == -1) RETURN_ERROR(errno, error0); new_table = _fstat(hashp->fp, &statbuf) == 0 && statbuf.st_size == 0 && (flags & O_ACCMODE) != O_RDONLY; } else new_table = 1; if (new_table) { if (!(hashp = init_hash(hashp, file, info))) RETURN_ERROR(errno, error1); } else { /* Table already exists */ if (info && info->hash) hashp->hash = info->hash; else hashp->hash = __default_hash; hdrsize = _read(hashp->fp, &hashp->hdr, sizeof(HASHHDR)); #if BYTE_ORDER == LITTLE_ENDIAN swap_header(hashp); #endif if (hdrsize == -1) RETURN_ERROR(errno, error1); if (hdrsize != sizeof(HASHHDR)) RETURN_ERROR(EFTYPE, error1); /* Verify file type, versions and hash function */ if (hashp->MAGIC != HASHMAGIC) RETURN_ERROR(EFTYPE, error1); #define OLDHASHVERSION 1 if (hashp->VERSION != HASHVERSION && hashp->VERSION != OLDHASHVERSION) RETURN_ERROR(EFTYPE, error1); if ((int32_t)hashp->hash(CHARKEY, sizeof(CHARKEY)) != hashp->H_CHARKEY) RETURN_ERROR(EFTYPE, error1); /* * Figure out how many segments we need. Max_Bucket is the * maximum bucket number, so the number of buckets is * max_bucket + 1. */ nsegs = (hashp->MAX_BUCKET + 1 + hashp->SGSIZE - 1) / hashp->SGSIZE; if (alloc_segs(hashp, nsegs)) /* * If alloc_segs fails, table will have been destroyed * and errno will have been set. */ return (NULL); /* Read in bitmaps */ bpages = (hashp->SPARES[hashp->OVFL_POINT] + (hashp->BSIZE << BYTE_SHIFT) - 1) >> (hashp->BSHIFT + BYTE_SHIFT); hashp->nmaps = bpages; (void)memset(&hashp->mapp[0], 0, bpages * sizeof(u_int32_t *)); } /* Initialize Buffer Manager */ if (info && info->cachesize) __buf_init(hashp, info->cachesize); else __buf_init(hashp, DEF_BUFSIZE); hashp->new_file = new_table; hashp->save_file = file && (hashp->flags & O_RDWR); hashp->cbucket = -1; if (!(dbp = (DB *)malloc(sizeof(DB)))) { save_errno = errno; hdestroy(hashp); errno = save_errno; return (NULL); } dbp->internal = hashp; dbp->close = hash_close; dbp->del = hash_delete; dbp->fd = hash_fd; dbp->get = hash_get; dbp->put = hash_put; dbp->seq = hash_seq; dbp->sync = hash_sync; dbp->type = DB_HASH; #ifdef DEBUG (void)fprintf(stderr, "%s\n%s%p\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%x\n%s%x\n%s%d\n%s%d\n", "init_htab:", "TABLE POINTER ", hashp, "BUCKET SIZE ", hashp->BSIZE, "BUCKET SHIFT ", hashp->BSHIFT, "DIRECTORY SIZE ", hashp->DSIZE, "SEGMENT SIZE ", hashp->SGSIZE, "SEGMENT SHIFT ", hashp->SSHIFT, "FILL FACTOR ", hashp->FFACTOR, "MAX BUCKET ", hashp->MAX_BUCKET, "OVFL POINT ", hashp->OVFL_POINT, "LAST FREED ", hashp->LAST_FREED, "HIGH MASK ", hashp->HIGH_MASK, "LOW MASK ", hashp->LOW_MASK, "NSEGS ", hashp->nsegs, "NKEYS ", hashp->NKEYS); #endif #ifdef HASH_STATISTICS hash_overflows = hash_accesses = hash_collisions = hash_expansions = 0; #endif return (dbp); error1: if (hashp != NULL) (void)_close(hashp->fp); error0: free(hashp); errno = save_errno; return (NULL); } static int hash_close(DB *dbp) { HTAB *hashp; int retval; if (!dbp) return (ERROR); hashp = (HTAB *)dbp->internal; retval = hdestroy(hashp); free(dbp); return (retval); } static int hash_fd(const DB *dbp) { HTAB *hashp; if (!dbp) return (ERROR); hashp = (HTAB *)dbp->internal; if (hashp->fp == -1) { errno = ENOENT; return (-1); } return (hashp->fp); } /************************** LOCAL CREATION ROUTINES **********************/ static HTAB * init_hash(HTAB *hashp, const char *file, const HASHINFO *info) { struct stat statbuf; int nelem; nelem = 1; hashp->NKEYS = 0; hashp->LORDER = BYTE_ORDER; hashp->BSIZE = DEF_BUCKET_SIZE; hashp->BSHIFT = DEF_BUCKET_SHIFT; hashp->SGSIZE = DEF_SEGSIZE; hashp->SSHIFT = DEF_SEGSIZE_SHIFT; hashp->DSIZE = DEF_DIRSIZE; hashp->FFACTOR = DEF_FFACTOR; hashp->hash = __default_hash; memset(hashp->SPARES, 0, sizeof(hashp->SPARES)); memset(hashp->BITMAPS, 0, sizeof (hashp->BITMAPS)); /* Fix bucket size to be optimal for file system */ if (file != NULL) { if (stat(file, &statbuf)) return (NULL); hashp->BSIZE = statbuf.st_blksize; if (hashp->BSIZE > MAX_BSIZE) hashp->BSIZE = MAX_BSIZE; hashp->BSHIFT = __log2(hashp->BSIZE); } if (info) { if (info->bsize) { /* Round pagesize up to power of 2 */ hashp->BSHIFT = __log2(info->bsize); hashp->BSIZE = 1 << hashp->BSHIFT; if (hashp->BSIZE > MAX_BSIZE) { errno = EINVAL; return (NULL); } } if (info->ffactor) hashp->FFACTOR = info->ffactor; if (info->hash) hashp->hash = info->hash; if (info->nelem) nelem = info->nelem; if (info->lorder) { if (info->lorder != BIG_ENDIAN && info->lorder != LITTLE_ENDIAN) { errno = EINVAL; return (NULL); } hashp->LORDER = info->lorder; } } /* init_htab should destroy the table and set errno if it fails */ if (init_htab(hashp, nelem)) return (NULL); else return (hashp); } /* * This calls alloc_segs which may run out of memory. Alloc_segs will destroy * the table and set errno, so we just pass the error information along. * * Returns 0 on No Error */ static int init_htab(HTAB *hashp, int nelem) { int nbuckets, nsegs, l2; /* * Divide number of elements by the fill factor and determine a * desired number of buckets. Allocate space for the next greater * power of two number of buckets. */ nelem = (nelem - 1) / hashp->FFACTOR + 1; l2 = __log2(MAX(nelem, 2)); nbuckets = 1 << l2; hashp->SPARES[l2] = l2 + 1; hashp->SPARES[l2 + 1] = l2 + 1; hashp->OVFL_POINT = l2; hashp->LAST_FREED = 2; /* First bitmap page is at: splitpoint l2 page offset 1 */ if (__ibitmap(hashp, OADDR_OF(l2, 1), l2 + 1, 0)) return (-1); hashp->MAX_BUCKET = hashp->LOW_MASK = nbuckets - 1; hashp->HIGH_MASK = (nbuckets << 1) - 1; hashp->HDRPAGES = ((MAX(sizeof(HASHHDR), MINHDRSIZE) - 1) >> hashp->BSHIFT) + 1; nsegs = (nbuckets - 1) / hashp->SGSIZE + 1; nsegs = 1 << __log2(nsegs); if (nsegs > hashp->DSIZE) hashp->DSIZE = nsegs; return (alloc_segs(hashp, nsegs)); } /********************** DESTROY/CLOSE ROUTINES ************************/ /* * Flushes any changes to the file if necessary and destroys the hashp * structure, freeing all allocated space. */ static int hdestroy(HTAB *hashp) { int i, save_errno; save_errno = 0; #ifdef HASH_STATISTICS (void)fprintf(stderr, "hdestroy: accesses %ld collisions %ld\n", hash_accesses, hash_collisions); (void)fprintf(stderr, "hdestroy: expansions %ld\n", hash_expansions); (void)fprintf(stderr, "hdestroy: overflows %ld\n", hash_overflows); (void)fprintf(stderr, "keys %ld maxp %d segmentcount %d\n", hashp->NKEYS, hashp->MAX_BUCKET, hashp->nsegs); for (i = 0; i < NCACHED; i++) (void)fprintf(stderr, "spares[%d] = %d\n", i, hashp->SPARES[i]); #endif /* * Call on buffer manager to free buffers, and if required, * write them to disk. */ if (__buf_free(hashp, 1, hashp->save_file)) save_errno = errno; if (hashp->dir) { free(*hashp->dir); /* Free initial segments */ /* Free extra segments */ while (hashp->exsegs--) free(hashp->dir[--hashp->nsegs]); free(hashp->dir); } if (flush_meta(hashp) && !save_errno) save_errno = errno; /* Free Bigmaps */ for (i = 0; i < hashp->nmaps; i++) if (hashp->mapp[i]) free(hashp->mapp[i]); if (hashp->tmp_key) free(hashp->tmp_key); if (hashp->tmp_buf) free(hashp->tmp_buf); - if (hashp->fp != -1) + if (hashp->fp != -1) { + (void)_fsync(hashp->fp); (void)_close(hashp->fp); + } free(hashp); if (save_errno) { errno = save_errno; return (ERROR); } return (SUCCESS); } /* * Write modified pages to disk * * Returns: * 0 == OK * -1 ERROR */ static int hash_sync(const DB *dbp, u_int32_t flags) { HTAB *hashp; if (flags != 0) { errno = EINVAL; return (ERROR); } if (!dbp) return (ERROR); hashp = (HTAB *)dbp->internal; if (!hashp->save_file) return (0); if (__buf_free(hashp, 0, 1) || flush_meta(hashp)) + return (ERROR); + if (hashp->fp != -1 && _fsync(hashp->fp) != 0) return (ERROR); hashp->new_file = 0; return (0); } /* * Returns: * 0 == OK * -1 indicates that errno should be set */ static int flush_meta(HTAB *hashp) { HASHHDR *whdrp; #if BYTE_ORDER == LITTLE_ENDIAN HASHHDR whdr; #endif int fp, i, wsize; if (!hashp->save_file) return (0); hashp->MAGIC = HASHMAGIC; hashp->VERSION = HASHVERSION; hashp->H_CHARKEY = hashp->hash(CHARKEY, sizeof(CHARKEY)); fp = hashp->fp; whdrp = &hashp->hdr; #if BYTE_ORDER == LITTLE_ENDIAN whdrp = &whdr; swap_header_copy(&hashp->hdr, whdrp); #endif if ((wsize = pwrite(fp, whdrp, sizeof(HASHHDR), (off_t)0)) == -1) return (-1); else if (wsize != sizeof(HASHHDR)) { errno = EFTYPE; hashp->error = errno; return (-1); } for (i = 0; i < NCACHED; i++) if (hashp->mapp[i]) if (__put_page(hashp, (char *)hashp->mapp[i], hashp->BITMAPS[i], 0, 1)) return (-1); return (0); } /*******************************SEARCH ROUTINES *****************************/ /* * All the access routines return * * Returns: * 0 on SUCCESS * 1 to indicate an external ERROR (i.e. key not found, etc) * -1 to indicate an internal ERROR (i.e. out of memory, etc) */ static int hash_get(const DB *dbp, const DBT *key, DBT *data, u_int32_t flag) { HTAB *hashp; hashp = (HTAB *)dbp->internal; if (flag) { hashp->error = errno = EINVAL; return (ERROR); } return (hash_access(hashp, HASH_GET, (DBT *)key, data)); } static int hash_put(const DB *dbp, DBT *key, const DBT *data, u_int32_t flag) { HTAB *hashp; hashp = (HTAB *)dbp->internal; if (flag && flag != R_NOOVERWRITE) { hashp->error = errno = EINVAL; return (ERROR); } if ((hashp->flags & O_ACCMODE) == O_RDONLY) { hashp->error = errno = EPERM; return (ERROR); } return (hash_access(hashp, flag == R_NOOVERWRITE ? HASH_PUTNEW : HASH_PUT, (DBT *)key, (DBT *)data)); } static int hash_delete(const DB *dbp, const DBT *key, u_int32_t flag) /* Ignored */ { HTAB *hashp; hashp = (HTAB *)dbp->internal; if (flag && flag != R_CURSOR) { hashp->error = errno = EINVAL; return (ERROR); } if ((hashp->flags & O_ACCMODE) == O_RDONLY) { hashp->error = errno = EPERM; return (ERROR); } return (hash_access(hashp, HASH_DELETE, (DBT *)key, NULL)); } /* * Assume that hashp has been set in wrapper routine. */ static int hash_access(HTAB *hashp, ACTION action, DBT *key, DBT *val) { BUFHEAD *rbufp; BUFHEAD *bufp, *save_bufp; u_int16_t *bp; int n, ndx, off, size; char *kp; u_int16_t pageno; #ifdef HASH_STATISTICS hash_accesses++; #endif off = hashp->BSIZE; size = key->size; kp = (char *)key->data; rbufp = __get_buf(hashp, __call_hash(hashp, kp, size), NULL, 0); if (!rbufp) return (ERROR); save_bufp = rbufp; /* Pin the bucket chain */ rbufp->flags |= BUF_PIN; for (bp = (u_int16_t *)rbufp->page, n = *bp++, ndx = 1; ndx < n;) if (bp[1] >= REAL_KEY) { /* Real key/data pair */ if (size == off - *bp && memcmp(kp, rbufp->page + *bp, size) == 0) goto found; off = bp[1]; #ifdef HASH_STATISTICS hash_collisions++; #endif bp += 2; ndx += 2; } else if (bp[1] == OVFLPAGE) { rbufp = __get_buf(hashp, *bp, rbufp, 0); if (!rbufp) { save_bufp->flags &= ~BUF_PIN; return (ERROR); } /* FOR LOOP INIT */ bp = (u_int16_t *)rbufp->page; n = *bp++; ndx = 1; off = hashp->BSIZE; } else if (bp[1] < REAL_KEY) { if ((ndx = __find_bigpair(hashp, rbufp, ndx, kp, size)) > 0) goto found; if (ndx == -2) { bufp = rbufp; if (!(pageno = __find_last_page(hashp, &bufp))) { ndx = 0; rbufp = bufp; break; /* FOR */ } rbufp = __get_buf(hashp, pageno, bufp, 0); if (!rbufp) { save_bufp->flags &= ~BUF_PIN; return (ERROR); } /* FOR LOOP INIT */ bp = (u_int16_t *)rbufp->page; n = *bp++; ndx = 1; off = hashp->BSIZE; } else { save_bufp->flags &= ~BUF_PIN; return (ERROR); } } /* Not found */ switch (action) { case HASH_PUT: case HASH_PUTNEW: if (__addel(hashp, rbufp, key, val)) { save_bufp->flags &= ~BUF_PIN; return (ERROR); } else { save_bufp->flags &= ~BUF_PIN; return (SUCCESS); } case HASH_GET: case HASH_DELETE: default: save_bufp->flags &= ~BUF_PIN; return (ABNORMAL); } found: switch (action) { case HASH_PUTNEW: save_bufp->flags &= ~BUF_PIN; return (ABNORMAL); case HASH_GET: bp = (u_int16_t *)rbufp->page; if (bp[ndx + 1] < REAL_KEY) { if (__big_return(hashp, rbufp, ndx, val, 0)) return (ERROR); } else { val->data = (u_char *)rbufp->page + (int)bp[ndx + 1]; val->size = bp[ndx] - bp[ndx + 1]; } break; case HASH_PUT: if ((__delpair(hashp, rbufp, ndx)) || (__addel(hashp, rbufp, key, val))) { save_bufp->flags &= ~BUF_PIN; return (ERROR); } break; case HASH_DELETE: if (__delpair(hashp, rbufp, ndx)) return (ERROR); break; default: abort(); } save_bufp->flags &= ~BUF_PIN; return (SUCCESS); } static int hash_seq(const DB *dbp, DBT *key, DBT *data, u_int32_t flag) { u_int32_t bucket; BUFHEAD *bufp; HTAB *hashp; u_int16_t *bp, ndx; hashp = (HTAB *)dbp->internal; if (flag && flag != R_FIRST && flag != R_NEXT) { hashp->error = errno = EINVAL; return (ERROR); } #ifdef HASH_STATISTICS hash_accesses++; #endif if ((hashp->cbucket < 0) || (flag == R_FIRST)) { hashp->cbucket = 0; hashp->cndx = 1; hashp->cpage = NULL; } next_bucket: for (bp = NULL; !bp || !bp[0]; ) { if (!(bufp = hashp->cpage)) { for (bucket = hashp->cbucket; bucket <= hashp->MAX_BUCKET; bucket++, hashp->cndx = 1) { bufp = __get_buf(hashp, bucket, NULL, 0); if (!bufp) return (ERROR); hashp->cpage = bufp; bp = (u_int16_t *)bufp->page; if (bp[0]) break; } hashp->cbucket = bucket; if ((u_int32_t)hashp->cbucket > hashp->MAX_BUCKET) { hashp->cbucket = -1; return (ABNORMAL); } } else { bp = (u_int16_t *)hashp->cpage->page; if (flag == R_NEXT || flag == 0) { hashp->cndx += 2; if (hashp->cndx > bp[0]) { hashp->cpage = NULL; hashp->cbucket++; hashp->cndx = 1; goto next_bucket; } } } #ifdef DEBUG assert(bp); assert(bufp); #endif while (bp[hashp->cndx + 1] == OVFLPAGE) { bufp = hashp->cpage = __get_buf(hashp, bp[hashp->cndx], bufp, 0); if (!bufp) return (ERROR); bp = (u_int16_t *)(bufp->page); hashp->cndx = 1; } if (!bp[0]) { hashp->cpage = NULL; ++hashp->cbucket; } } ndx = hashp->cndx; if (bp[ndx + 1] < REAL_KEY) { if (__big_keydata(hashp, bufp, key, data, 1)) return (ERROR); } else { if (hashp->cpage == 0) return (ERROR); key->data = (u_char *)hashp->cpage->page + bp[ndx]; key->size = (ndx > 1 ? bp[ndx - 1] : hashp->BSIZE) - bp[ndx]; data->data = (u_char *)hashp->cpage->page + bp[ndx + 1]; data->size = bp[ndx] - bp[ndx + 1]; } return (SUCCESS); } /********************************* UTILITIES ************************/ /* * Returns: * 0 ==> OK * -1 ==> Error */ int __expand_table(HTAB *hashp) { u_int32_t old_bucket, new_bucket; int dirsize, new_segnum, spare_ndx; #ifdef HASH_STATISTICS hash_expansions++; #endif new_bucket = ++hashp->MAX_BUCKET; old_bucket = (hashp->MAX_BUCKET & hashp->LOW_MASK); new_segnum = new_bucket >> hashp->SSHIFT; /* Check if we need a new segment */ if (new_segnum >= hashp->nsegs) { /* Check if we need to expand directory */ if (new_segnum >= hashp->DSIZE) { /* Reallocate directory */ dirsize = hashp->DSIZE * sizeof(SEGMENT *); if (!hash_realloc(&hashp->dir, dirsize, dirsize << 1)) return (-1); hashp->DSIZE = dirsize << 1; } if ((hashp->dir[new_segnum] = calloc(hashp->SGSIZE, sizeof(SEGMENT))) == NULL) return (-1); hashp->exsegs++; hashp->nsegs++; } /* * If the split point is increasing (MAX_BUCKET's log base 2 * * increases), we need to copy the current contents of the spare * split bucket to the next bucket. */ spare_ndx = __log2(hashp->MAX_BUCKET + 1); if (spare_ndx > hashp->OVFL_POINT) { hashp->SPARES[spare_ndx] = hashp->SPARES[hashp->OVFL_POINT]; hashp->OVFL_POINT = spare_ndx; } if (new_bucket > hashp->HIGH_MASK) { /* Starting a new doubling */ hashp->LOW_MASK = hashp->HIGH_MASK; hashp->HIGH_MASK = new_bucket | hashp->LOW_MASK; } /* Relocate records to the new bucket */ return (__split_page(hashp, old_bucket, new_bucket)); } /* * If realloc guarantees that the pointer is not destroyed if the realloc * fails, then this routine can go away. */ static void * hash_realloc(SEGMENT **p_ptr, int oldsize, int newsize) { void *p; if ( (p = malloc(newsize)) ) { memmove(p, *p_ptr, oldsize); memset((char *)p + oldsize, 0, newsize - oldsize); free(*p_ptr); *p_ptr = p; } return (p); } u_int32_t __call_hash(HTAB *hashp, char *k, int len) { unsigned int n, bucket; n = hashp->hash(k, len); bucket = n & hashp->HIGH_MASK; if (bucket > hashp->MAX_BUCKET) bucket = bucket & hashp->LOW_MASK; return (bucket); } /* * Allocate segment table. On error, destroy the table and set errno. * * Returns 0 on success */ static int alloc_segs(HTAB *hashp, int nsegs) { int i; SEGMENT store; int save_errno; if ((hashp->dir = calloc(hashp->DSIZE, sizeof(SEGMENT *))) == NULL) { save_errno = errno; (void)hdestroy(hashp); errno = save_errno; return (-1); } hashp->nsegs = nsegs; if (nsegs == 0) return (0); /* Allocate segments */ if ((store = calloc(nsegs << hashp->SSHIFT, sizeof(SEGMENT))) == NULL) { save_errno = errno; (void)hdestroy(hashp); errno = save_errno; return (-1); } for (i = 0; i < nsegs; i++) hashp->dir[i] = &store[i << hashp->SSHIFT]; return (0); } #if BYTE_ORDER == LITTLE_ENDIAN /* * Hashp->hdr needs to be byteswapped. */ static void swap_header_copy(HASHHDR *srcp, HASHHDR *destp) { int i; P_32_COPY(srcp->magic, destp->magic); P_32_COPY(srcp->version, destp->version); P_32_COPY(srcp->lorder, destp->lorder); P_32_COPY(srcp->bsize, destp->bsize); P_32_COPY(srcp->bshift, destp->bshift); P_32_COPY(srcp->dsize, destp->dsize); P_32_COPY(srcp->ssize, destp->ssize); P_32_COPY(srcp->sshift, destp->sshift); P_32_COPY(srcp->ovfl_point, destp->ovfl_point); P_32_COPY(srcp->last_freed, destp->last_freed); P_32_COPY(srcp->max_bucket, destp->max_bucket); P_32_COPY(srcp->high_mask, destp->high_mask); P_32_COPY(srcp->low_mask, destp->low_mask); P_32_COPY(srcp->ffactor, destp->ffactor); P_32_COPY(srcp->nkeys, destp->nkeys); P_32_COPY(srcp->hdrpages, destp->hdrpages); P_32_COPY(srcp->h_charkey, destp->h_charkey); for (i = 0; i < NCACHED; i++) { P_32_COPY(srcp->spares[i], destp->spares[i]); P_16_COPY(srcp->bitmaps[i], destp->bitmaps[i]); } } static void swap_header(HTAB *hashp) { HASHHDR *hdrp; int i; hdrp = &hashp->hdr; M_32_SWAP(hdrp->magic); M_32_SWAP(hdrp->version); M_32_SWAP(hdrp->lorder); M_32_SWAP(hdrp->bsize); M_32_SWAP(hdrp->bshift); M_32_SWAP(hdrp->dsize); M_32_SWAP(hdrp->ssize); M_32_SWAP(hdrp->sshift); M_32_SWAP(hdrp->ovfl_point); M_32_SWAP(hdrp->last_freed); M_32_SWAP(hdrp->max_bucket); M_32_SWAP(hdrp->high_mask); M_32_SWAP(hdrp->low_mask); M_32_SWAP(hdrp->ffactor); M_32_SWAP(hdrp->nkeys); M_32_SWAP(hdrp->hdrpages); M_32_SWAP(hdrp->h_charkey); for (i = 0; i < NCACHED; i++) { M_32_SWAP(hdrp->spares[i]); M_16_SWAP(hdrp->bitmaps[i]); } } #endif Index: projects/release-pkg/lib/libc =================================================================== --- projects/release-pkg/lib/libc (revision 295925) +++ projects/release-pkg/lib/libc (revision 295926) Property changes on: projects/release-pkg/lib/libc ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/lib/libc:r295886-295925 Index: projects/release-pkg/sys/arm/arm/cpu_asm-v6.S =================================================================== --- projects/release-pkg/sys/arm/arm/cpu_asm-v6.S (revision 295925) +++ projects/release-pkg/sys/arm/arm/cpu_asm-v6.S (revision 295926) @@ -1,278 +1,271 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "assym.s" #include #include #include #include #include -#if __ARM_ARCH >= 6 #define GET_PCB(tmp) \ - mrc CP15_TPIDRPRW(tmp); \ - add tmp, tmp, #(TD_PCB) -#else -.Lcurpcb: - .word _C_LABEL(__pcpu) + PC_CURPCB -#define GET_PCB(tmp) \ - ldr tmp, .Lcurpcb -#endif + mrc CP15_TPIDRPRW(tmp); \ + add tmp, tmp, #(TD_PCB) /* * Define cache functions used by startup code, which counts on the fact that * only r0-r3,r12 (ip) are modified and no stack space is used. These functions * must be called with interrupts disabled. Moreover, these work only with * caches integrated to CPU (accessible via CP15); systems with an external L2 * cache controller such as a PL310 need separate calls to that device driver * to affect L2 caches. This is not a factor during early kernel startup, as * any external L2 cache controller has not been enabled yet. */ /* Invalidate D cache to PoC. (aka all cache levels)*/ ASENTRY_NP(dcache_inv_poc_all) #if __ARM_ARCH == 6 mcr CP15_DCIALL DSB bx lr #else mrc CP15_CLIDR(r0) ands r0, r0, #0x07000000 mov r0, r0, lsr #23 /* Get LoC 'naturally' aligned for */ beq 4f /* use in the CSSELR register below */ 1: sub r0, #2 mcr CP15_CSSELR(r0) /* set cache level */ isb mrc CP15_CCSIDR(r0) /* read CCSIDR */ ubfx r2, r0, #13, #15 /* get num sets - 1 from CCSIDR */ ubfx r3, r0, #3, #10 /* get num ways - 1 from CCSIDR */ clz r1, r3 /* number of bits to MSB of way */ lsl r3, r3, r1 /* shift into position */ mov ip, #1 lsl ip, ip, r1 /* ip now contains the way decr */ ubfx r0, r0, #0, #3 /* get linesize from CCSIDR */ add r0, r0, #4 /* apply bias */ lsl r2, r2, r0 /* shift sets by log2(linesize) */ add r3, r3, r2 /* merge numsets - 1 with numways - 1 */ sub ip, ip, r2 /* subtract numsets - 1 from way decr */ mov r1, #1 lsl r1, r1, r0 /* r1 now contains the set decr */ mov r2, ip /* r2 now contains set way decr */ /* r3 = ways/sets, r2 = way decr, r1 = set decr, r0 and ip are free */ 2: mcr CP15_DCISW(r3) /* invalidate line */ movs r0, r3 /* get current way/set */ beq 3f /* at 0 means we are done */ movs r0, r0, lsl #10 /* clear way bits leaving only set bits*/ subne r3, r3, r1 /* non-zero?, decrement set */ subeq r3, r3, r2 /* zero?, decrement way and restore set count */ b 2b 3: mrc CP15_CSSELR(r0) /* get cache level */ teq r0, #0 bne 1b 4: dsb /* wait for stores to finish */ mov r0, #0 mcr CP15_CSSELR(r0) isb bx lr #endif /* __ARM_ARCH == 6 */ END(dcache_inv_poc_all) /* Invalidate D cache to PoU. (aka L1 cache only)*/ ASENTRY_NP(dcache_inv_pou_all) #if __ARM_ARCH == 6 mcr CP15_DCIALL DSB bx lr #else mrc CP15_CLIDR(r0) ands r0, r0, #0x38000000 mov r0, r0, lsr #26 /* Get LoUU (naturally aligned) */ beq 4f 1: sub r0, #2 mcr CP15_CSSELR(r0) /* set cache level */ isb mrc CP15_CCSIDR(r0) /* read CCSIDR */ ubfx r2, r0, #13, #15 /* get num sets - 1 from CCSIDR */ ubfx r3, r0, #3, #10 /* get num ways - 1 from CCSIDR */ clz r1, r3 /* number of bits to MSB of way */ lsl r3, r3, r1 /* shift into position */ mov ip, #1 lsl ip, ip, r1 /* ip now contains the way decr */ ubfx r0, r0, #0, #3 /* get linesize from CCSIDR */ add r0, r0, #4 /* apply bias */ lsl r2, r2, r0 /* shift sets by log2(linesize) */ add r3, r3, r2 /* merge numsets - 1 with numways - 1 */ sub ip, ip, r2 /* subtract numsets - 1 from way decr */ mov r1, #1 lsl r1, r1, r0 /* r1 now contains the set decr */ mov r2, ip /* r2 now contains set way decr */ /* r3 = ways/sets, r2 = way decr, r1 = set decr, r0 and ip are free */ 2: mcr CP15_DCISW(r3) /* invalidate line */ movs r0, r3 /* get current way/set */ beq 3f /* at 0 means we are done */ movs r0, r0, lsl #10 /* clear way bits leaving only set bits*/ subne r3, r3, r1 /* non-zero?, decrement set */ subeq r3, r3, r2 /* zero?, decrement way and restore set count */ b 2b 3: mrc CP15_CSSELR(r0) /* get cache level */ teq r0, #0 bne 1b 4: dsb /* wait for stores to finish */ mov r0, #0 mcr CP15_CSSELR(r0) bx lr #endif END(dcache_inv_pou_all) /* Write back and Invalidate D cache to PoC. */ ASENTRY_NP(dcache_wbinv_poc_all) #if __ARM_ARCH == 6 mcr CP15_DCCIALL DSB bx lr #else mrc CP15_CLIDR(r0) ands r0, r0, #0x07000000 beq 4f mov r0, #0 /* Clean from inner to outer levels */ 1: mcr CP15_CSSELR(r0) /* set cache level */ isb mrc CP15_CCSIDR(r0) /* read CCSIDR */ ubfx r2, r0, #13, #15 /* get num sets - 1 from CCSIDR */ ubfx r3, r0, #3, #10 /* get num ways - 1 from CCSIDR */ clz r1, r3 /* number of bits to MSB of way */ lsl r3, r3, r1 /* shift into position */ mov ip, #1 lsl ip, ip, r1 /* ip now contains the way decr */ ubfx r0, r0, #0, #3 /* get linesize from CCSIDR */ add r0, r0, #4 /* apply bias */ lsl r2, r2, r0 /* shift sets by log2(linesize) */ add r3, r3, r2 /* merge numsets - 1 with numways - 1 */ sub ip, ip, r2 /* subtract numsets - 1 from way decr */ mov r1, #1 lsl r1, r1, r0 /* r1 now contains the set decr */ mov r2, ip /* r2 now contains set way decr */ /* r3 = ways/sets, r2 = way decr, r1 = set decr, r0 and ip are free */ 2: mcr CP15_DCCISW(r3) /* clean and invalidate line */ movs r0, r3 /* get current way/set */ beq 3f /* at 0 means we are done */ movs r0, r0, lsl #10 /* clear way bits leaving only set bits*/ subne r3, r3, r1 /* non-zero?, decrement set */ subeq r3, r3, r2 /* zero?, decrement way and restore set count */ b 2b 3: mrc CP15_CSSELR(r0) /* get cache level */ add r0, r0, #2 /* next level */ mrc CP15_CLIDR(r1) ands r1, r1, #0x07000000 mov r1, r1, lsr #23 /* Get LoC (naturally aligned) */ cmp r1, r0 bne 1b 4: dsb /* wait for stores to finish */ mov r0, #0 mcr CP15_CSSELR(r0) bx lr #endif /* __ARM_ARCH == 6 */ END(dcache_wbinv_poc_all) ASENTRY_NP(dcache_wb_pou_checked) ldr ip, .Lcpuinfo ldr ip, [ip, #DCACHE_LINE_SIZE] GET_PCB(r2) ldr r2, [r2] adr r3, _C_LABEL(cachebailout) str r3, [r2, #PCB_ONFAULT] 1: mcr CP15_DCCMVAC(r0) add r0, r0, ip subs r1, r1, ip bhi 1b DSB mov r0, #0 str r0, [r2, #PCB_ONFAULT] mov r0, #1 /* cannot be faulting address */ RET .Lcpuinfo: .word cpuinfo END(dcache_wb_pou_checked) ASENTRY_NP(icache_inv_pou_checked) ldr ip, .Lcpuinfo ldr ip, [ip, #ICACHE_LINE_SIZE] GET_PCB(r2) ldr r2, [r2] adr r3, _C_LABEL(cachebailout) str r3, [r2, #PCB_ONFAULT] 1: mcr CP15_ICIMVAU(r0) add r0, r0, ip subs r1, r1, ip bhi 1b DSB ISB mov r0, #0 str r0, [r2, #PCB_ONFAULT] mov r0, #1 /* cannot be faulting address */ RET END(icache_inv_pou_checked) /* label must be global as trap-v6.c references it */ .global _C_LABEL(cachebailout) _C_LABEL(cachebailout): DSB ISB mov r1, #0 str r1, [r2, #PCB_ONFAULT] RET Index: projects/release-pkg/sys/arm/arm/trap-v4.c =================================================================== --- projects/release-pkg/sys/arm/arm/trap-v4.c (revision 295925) +++ projects/release-pkg/sys/arm/arm/trap-v4.c (revision 295926) @@ -1,737 +1,732 @@ /* $NetBSD: fault.c,v 1.45 2003/11/20 14:44:36 scw Exp $ */ /*- * Copyright 2004 Olivier Houchard * Copyright 2003 Wasabi Systems, Inc. * All rights reserved. * * Written by Steve C. Woodford for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1994-1997 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * fault.c * * Fault handlers * * Created : 28/11/94 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #ifdef KDB #include #endif #ifdef KDTRACE_HOOKS #include #endif #define ReadWord(a) (*((volatile unsigned int *)(a))) #ifdef DEBUG int last_fault_code; /* For the benefit of pmap_fault_fixup() */ #endif struct ksig { int signb; u_long code; }; struct data_abort { int (*func)(struct trapframe *, u_int, u_int, struct thread *, struct ksig *); const char *desc; }; static int dab_fatal(struct trapframe *, u_int, u_int, struct thread *, struct ksig *); static int dab_align(struct trapframe *, u_int, u_int, struct thread *, struct ksig *); static int dab_buserr(struct trapframe *, u_int, u_int, struct thread *, struct ksig *); static void prefetch_abort_handler(struct trapframe *); static const struct data_abort data_aborts[] = { {dab_fatal, "Vector Exception"}, {dab_align, "Alignment Fault 1"}, {dab_fatal, "Terminal Exception"}, {dab_align, "Alignment Fault 3"}, {dab_buserr, "External Linefetch Abort (S)"}, {NULL, "Translation Fault (S)"}, #if (ARM_MMU_V6 + ARM_MMU_V7) != 0 {NULL, "Translation Flag Fault"}, #else {dab_buserr, "External Linefetch Abort (P)"}, #endif {NULL, "Translation Fault (P)"}, {dab_buserr, "External Non-Linefetch Abort (S)"}, {NULL, "Domain Fault (S)"}, {dab_buserr, "External Non-Linefetch Abort (P)"}, {NULL, "Domain Fault (P)"}, {dab_buserr, "External Translation Abort (L1)"}, {NULL, "Permission Fault (S)"}, {dab_buserr, "External Translation Abort (L2)"}, {NULL, "Permission Fault (P)"} }; /* Determine if a fault came from user mode */ #define TRAP_USERMODE(tf) ((tf->tf_spsr & PSR_MODE) == PSR_USR32_MODE) /* Determine if 'x' is a permission fault */ #define IS_PERMISSION_FAULT(x) \ (((1 << ((x) & FAULT_TYPE_MASK)) & \ ((1 << FAULT_PERM_P) | (1 << FAULT_PERM_S))) != 0) static __inline void call_trapsignal(struct thread *td, int sig, u_long code) { ksiginfo_t ksi; ksiginfo_init_trap(&ksi); ksi.ksi_signo = sig; ksi.ksi_code = (int)code; trapsignal(td, &ksi); } void abort_handler(struct trapframe *tf, int type) { struct vm_map *map; struct pcb *pcb; struct thread *td; u_int user, far, fsr; vm_prot_t ftype; void *onfault; vm_offset_t va; int error = 0; struct ksig ksig; struct proc *p; if (type == 1) return (prefetch_abort_handler(tf)); /* Grab FAR/FSR before enabling interrupts */ far = cpu_faultaddress(); fsr = cpu_faultstatus(); #if 0 printf("data abort: fault address=%p (from pc=%p lr=%p)\n", (void*)far, (void*)tf->tf_pc, (void*)tf->tf_svc_lr); #endif /* Update vmmeter statistics */ #if 0 vmexp.traps++; #endif td = curthread; p = td->td_proc; PCPU_INC(cnt.v_trap); /* Data abort came from user mode? */ user = TRAP_USERMODE(tf); if (user) { td->td_pticks = 0; td->td_frame = tf; if (td->td_cowgen != td->td_proc->p_cowgen) thread_cow_update(td); } /* Grab the current pcb */ pcb = td->td_pcb; /* Re-enable interrupts if they were enabled previously */ if (td->td_md.md_spinlock_count == 0) { if (__predict_true(tf->tf_spsr & PSR_I) == 0) enable_interrupts(PSR_I); if (__predict_true(tf->tf_spsr & PSR_F) == 0) enable_interrupts(PSR_F); } /* Invoke the appropriate handler, if necessary */ if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) { if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far, td, &ksig)) { goto do_trapsignal; } goto out; } /* * At this point, we're dealing with one of the following data aborts: * * FAULT_TRANS_S - Translation -- Section * FAULT_TRANS_P - Translation -- Page * FAULT_DOMAIN_S - Domain -- Section * FAULT_DOMAIN_P - Domain -- Page * FAULT_PERM_S - Permission -- Section * FAULT_PERM_P - Permission -- Page * * These are the main virtual memory-related faults signalled by * the MMU. */ /* * Make sure the Program Counter is sane. We could fall foul of * someone executing Thumb code, in which case the PC might not * be word-aligned. This would cause a kernel alignment fault * further down if we have to decode the current instruction. * XXX: It would be nice to be able to support Thumb at some point. */ if (__predict_false((tf->tf_pc & 3) != 0)) { if (user) { /* * Give the user an illegal instruction signal. */ /* Deliver a SIGILL to the process */ ksig.signb = SIGILL; ksig.code = 0; goto do_trapsignal; } /* * The kernel never executes Thumb code. */ printf("\ndata_abort_fault: Misaligned Kernel-mode " "Program Counter\n"); dab_fatal(tf, fsr, far, td, &ksig); } va = trunc_page((vm_offset_t)far); /* * It is only a kernel address space fault iff: * 1. user == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction. */ if (user == 0 && (va >= VM_MIN_KERNEL_ADDRESS || (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) && __predict_true((pcb->pcb_onfault == NULL || (ReadWord(tf->tf_pc) & 0x05200000) != 0x04200000))) { map = kernel_map; /* Was the fault due to the FPE/IPKDB ? */ if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) { /* * Force exit via userret() * This is necessary as the FPE is an extension to * userland that actually runs in a priveledged mode * but uses USR mode permissions for its accesses. */ user = 1; ksig.signb = SIGSEGV; ksig.code = 0; goto do_trapsignal; } } else { map = &td->td_proc->p_vmspace->vm_map; } /* - * We need to know whether the page should be mapped as R or R/W. On - * armv6 and later the fault status register indicates whether the - * access was a read or write. Prior to armv6, we know that a - * permission fault can only be the result of a write to a read-only - * location, so we can deal with those quickly. Otherwise we need to - * disassemble the faulting instruction to determine if it was a write. + * We need to know whether the page should be mapped as R or R/W. + * On armv4, the fault status register does not indicate whether + * the access was a read or write. We know that a permission fault + * can only be the result of a write to a read-only location, so we + * can deal with those quickly. Otherwise we need to disassemble + * the faulting instruction to determine if it was a write. */ -#if __ARM_ARCH >= 6 - ftype = (fsr & FAULT_WNR) ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ; -#else if (IS_PERMISSION_FAULT(fsr)) ftype = VM_PROT_WRITE; else { u_int insn = ReadWord(tf->tf_pc); if (((insn & 0x0c100000) == 0x04000000) || /* STR/STRB */ ((insn & 0x0e1000b0) == 0x000000b0) || /* STRH/STRD */ ((insn & 0x0a100000) == 0x08000000)) { /* STM/CDT */ ftype = VM_PROT_WRITE; } else { if ((insn & 0x0fb00ff0) == 0x01000090) /* SWP */ ftype = VM_PROT_READ | VM_PROT_WRITE; else ftype = VM_PROT_READ; } } -#endif /* * See if the fault is as a result of ref/mod emulation, * or domain mismatch. */ #ifdef DEBUG last_fault_code = fsr; #endif if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL, "Kernel page fault") != 0) goto fatal_pagefault; if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype, user)) { goto out; } onfault = pcb->pcb_onfault; pcb->pcb_onfault = NULL; error = vm_fault(map, va, ftype, VM_FAULT_NORMAL); pcb->pcb_onfault = onfault; if (__predict_true(error == 0)) goto out; fatal_pagefault: if (user == 0) { if (pcb->pcb_onfault) { tf->tf_r0 = error; tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; return; } printf("\nvm_fault(%p, %x, %x, 0) -> %x\n", map, va, ftype, error); dab_fatal(tf, fsr, far, td, &ksig); } if (error == ENOMEM) { printf("VM: pid %d (%s), uid %d killed: " "out of swap\n", td->td_proc->p_pid, td->td_name, (td->td_proc->p_ucred) ? td->td_proc->p_ucred->cr_uid : -1); ksig.signb = SIGKILL; } else { ksig.signb = SIGSEGV; } ksig.code = 0; do_trapsignal: call_trapsignal(td, ksig.signb, ksig.code); out: /* If returning to user mode, make sure to invoke userret() */ if (user) userret(td, tf); } /* * dab_fatal() handles the following data aborts: * * FAULT_WRTBUF_0 - Vector Exception * FAULT_WRTBUF_1 - Terminal Exception * * We should never see these on a properly functioning system. * * This function is also called by the other handlers if they * detect a fatal problem. * * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort. */ static int dab_fatal(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig) { const char *mode; #ifdef KDTRACE_HOOKS if (!TRAP_USERMODE(tf)) { if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far & FAULT_TYPE_MASK)) return (0); } #endif mode = TRAP_USERMODE(tf) ? "user" : "kernel"; disable_interrupts(PSR_I|PSR_F); if (td != NULL) { printf("Fatal %s mode data abort: '%s'\n", mode, data_aborts[fsr & FAULT_TYPE_MASK].desc); printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr); if ((fsr & FAULT_IMPRECISE) == 0) printf("%08x, ", far); else printf("Invalid, "); printf("spsr=%08x\n", tf->tf_spsr); } else { printf("Fatal %s mode prefetch abort at 0x%08x\n", mode, tf->tf_pc); printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr); } printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n", tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3); printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n", tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7); printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n", tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11); printf("r12=%08x, ", tf->tf_r12); if (TRAP_USERMODE(tf)) printf("usp=%08x, ulr=%08x", tf->tf_usr_sp, tf->tf_usr_lr); else printf("ssp=%08x, slr=%08x", tf->tf_svc_sp, tf->tf_svc_lr); printf(", pc =%08x\n\n", tf->tf_pc); #ifdef KDB if (debugger_on_panic || kdb_active) if (kdb_trap(fsr, 0, tf)) return (0); #endif panic("Fatal abort"); /*NOTREACHED*/ } /* * dab_align() handles the following data aborts: * * FAULT_ALIGN_0 - Alignment fault * FAULT_ALIGN_1 - Alignment fault * * These faults are fatal if they happen in kernel mode. Otherwise, we * deliver a bus error to the process. */ static int dab_align(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig) { /* Alignment faults are always fatal if they occur in kernel mode */ if (!TRAP_USERMODE(tf)) { if (!td || !td->td_pcb->pcb_onfault) dab_fatal(tf, fsr, far, td, ksig); tf->tf_r0 = EFAULT; tf->tf_pc = (int)td->td_pcb->pcb_onfault; return (0); } /* pcb_onfault *must* be NULL at this point */ /* Deliver a bus error signal to the process */ ksig->code = 0; ksig->signb = SIGBUS; td->td_frame = tf; return (1); } /* * dab_buserr() handles the following data aborts: * * FAULT_BUSERR_0 - External Abort on Linefetch -- Section * FAULT_BUSERR_1 - External Abort on Linefetch -- Page * FAULT_BUSERR_2 - External Abort on Non-linefetch -- Section * FAULT_BUSERR_3 - External Abort on Non-linefetch -- Page * FAULT_BUSTRNL1 - External abort on Translation -- Level 1 * FAULT_BUSTRNL2 - External abort on Translation -- Level 2 * * If pcb_onfault is set, flag the fault and return to the handler. * If the fault occurred in user mode, give the process a SIGBUS. * * Note: On XScale, FAULT_BUSERR_0, FAULT_BUSERR_1, and FAULT_BUSERR_2 * can be flagged as imprecise in the FSR. This causes a real headache * since some of the machine state is lost. In this case, tf->tf_pc * may not actually point to the offending instruction. In fact, if * we've taken a double abort fault, it generally points somewhere near * the top of "data_abort_entry" in exception.S. * * In all other cases, these data aborts are considered fatal. */ static int dab_buserr(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig) { struct pcb *pcb = td->td_pcb; #ifdef __XSCALE__ if ((fsr & FAULT_IMPRECISE) != 0 && (tf->tf_spsr & PSR_MODE) == PSR_ABT32_MODE) { /* * Oops, an imprecise, double abort fault. We've lost the * r14_abt/spsr_abt values corresponding to the original * abort, and the spsr saved in the trapframe indicates * ABT mode. */ tf->tf_spsr &= ~PSR_MODE; /* * We use a simple heuristic to determine if the double abort * happened as a result of a kernel or user mode access. * If the current trapframe is at the top of the kernel stack, * the fault _must_ have come from user mode. */ if (tf != ((struct trapframe *)pcb->pcb_regs.sf_sp) - 1) { /* * Kernel mode. We're either about to die a * spectacular death, or pcb_onfault will come * to our rescue. Either way, the current value * of tf->tf_pc is irrelevant. */ tf->tf_spsr |= PSR_SVC32_MODE; if (pcb->pcb_onfault == NULL) printf("\nKernel mode double abort!\n"); } else { /* * User mode. We've lost the program counter at the * time of the fault (not that it was accurate anyway; * it's not called an imprecise fault for nothing). * About all we can do is copy r14_usr to tf_pc and * hope for the best. The process is about to get a * SIGBUS, so it's probably history anyway. */ tf->tf_spsr |= PSR_USR32_MODE; tf->tf_pc = tf->tf_usr_lr; } } /* FAR is invalid for imprecise exceptions */ if ((fsr & FAULT_IMPRECISE) != 0) far = 0; #endif /* __XSCALE__ */ if (pcb->pcb_onfault) { tf->tf_r0 = EFAULT; tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; return (0); } /* * At this point, if the fault happened in kernel mode, we're toast */ if (!TRAP_USERMODE(tf)) dab_fatal(tf, fsr, far, td, ksig); /* Deliver a bus error signal to the process */ ksig->signb = SIGBUS; ksig->code = 0; td->td_frame = tf; return (1); } /* * void prefetch_abort_handler(struct trapframe *tf) * * Abort handler called when instruction execution occurs at * a non existent or restricted (access permissions) memory page. * If the address is invalid and we were in SVC mode then panic as * the kernel should never prefetch abort. * If the address is invalid and the page is mapped then the user process * does no have read permission so send it a signal. * Otherwise fault the page in and try again. */ static void prefetch_abort_handler(struct trapframe *tf) { struct thread *td; struct proc * p; struct vm_map *map; vm_offset_t fault_pc, va; int error = 0; struct ksig ksig; #if 0 /* Update vmmeter statistics */ uvmexp.traps++; #endif #if 0 printf("prefetch abort handler: %p %p\n", (void*)tf->tf_pc, (void*)tf->tf_usr_lr); #endif td = curthread; p = td->td_proc; PCPU_INC(cnt.v_trap); if (TRAP_USERMODE(tf)) { td->td_frame = tf; if (td->td_cowgen != td->td_proc->p_cowgen) thread_cow_update(td); } fault_pc = tf->tf_pc; if (td->td_md.md_spinlock_count == 0) { if (__predict_true(tf->tf_spsr & PSR_I) == 0) enable_interrupts(PSR_I); if (__predict_true(tf->tf_spsr & PSR_F) == 0) enable_interrupts(PSR_F); } /* Prefetch aborts cannot happen in kernel mode */ if (__predict_false(!TRAP_USERMODE(tf))) dab_fatal(tf, 0, tf->tf_pc, NULL, &ksig); td->td_pticks = 0; /* Ok validate the address, can only execute in USER space */ if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS || (fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) { ksig.signb = SIGSEGV; ksig.code = 0; goto do_trapsignal; } map = &td->td_proc->p_vmspace->vm_map; va = trunc_page(fault_pc); /* * See if the pmap can handle this fault on its own... */ #ifdef DEBUG last_fault_code = -1; #endif if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ, 1)) goto out; error = vm_fault(map, va, VM_PROT_READ | VM_PROT_EXECUTE, VM_FAULT_NORMAL); if (__predict_true(error == 0)) goto out; if (error == ENOMEM) { printf("VM: pid %d (%s), uid %d killed: " "out of swap\n", td->td_proc->p_pid, td->td_name, (td->td_proc->p_ucred) ? td->td_proc->p_ucred->cr_uid : -1); ksig.signb = SIGKILL; } else { ksig.signb = SIGSEGV; } ksig.code = 0; do_trapsignal: call_trapsignal(td, ksig.signb, ksig.code); out: userret(td, tf); } extern int badaddr_read_1(const uint8_t *, uint8_t *); extern int badaddr_read_2(const uint16_t *, uint16_t *); extern int badaddr_read_4(const uint32_t *, uint32_t *); /* * Tentatively read an 8, 16, or 32-bit value from 'addr'. * If the read succeeds, the value is written to 'rptr' and zero is returned. * Else, return EFAULT. */ int badaddr_read(void *addr, size_t size, void *rptr) { union { uint8_t v1; uint16_t v2; uint32_t v4; } u; int rv; cpu_drain_writebuf(); /* Read from the test address. */ switch (size) { case sizeof(uint8_t): rv = badaddr_read_1(addr, &u.v1); if (rv == 0 && rptr) *(uint8_t *) rptr = u.v1; break; case sizeof(uint16_t): rv = badaddr_read_2(addr, &u.v2); if (rv == 0 && rptr) *(uint16_t *) rptr = u.v2; break; case sizeof(uint32_t): rv = badaddr_read_4(addr, &u.v4); if (rv == 0 && rptr) *(uint32_t *) rptr = u.v4; break; default: panic("badaddr: invalid size (%lu)", (u_long) size); } /* Return EFAULT if the address was invalid, else zero */ return (rv); } Index: projects/release-pkg/sys/arm/freescale/imx/imx6_machdep.c =================================================================== --- projects/release-pkg/sys/arm/freescale/imx/imx6_machdep.c (revision 295925) +++ projects/release-pkg/sys/arm/freescale/imx/imx6_machdep.c (revision 295926) @@ -1,287 +1,359 @@ /*- * Copyright (c) 2013 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "platform_if.h" struct fdt_fixup_entry fdt_fixup_table[] = { { NULL, NULL } }; static uint32_t gpio1_node; #ifndef ARM_INTRNG /* * Work around the linux workaround for imx6 erratum 006687, in which some * ethernet interrupts don't go to the GPC and thus won't wake the system from * Wait mode. We don't use Wait mode (which halts the GIC, leaving only GPC * interrupts able to wake the system), so we don't experience the bug at all. * The linux workaround is to reconfigure GPIO1_6 as the ENET interrupt by * writing magic values to an undocumented IOMUX register, then letting the gpio * interrupt driver notify the ethernet driver. We'll be able to do all that * (even though we don't need to) once the INTRNG project is committed and the * imx_gpio driver becomes an interrupt driver. Until then, this crazy little * workaround watches for requests to map an interrupt 6 with the interrupt * controller node referring to gpio1, and it substitutes the proper ffec * interrupt number. */ static int imx6_decode_fdt(uint32_t iparent, uint32_t *intr, int *interrupt, int *trig, int *pol) { if (fdt32_to_cpu(intr[0]) == 6 && OF_node_from_xref(iparent) == gpio1_node) { *interrupt = 150; *trig = INTR_TRIGGER_CONFORM; *pol = INTR_POLARITY_CONFORM; return (0); } return (gic_decode_fdt(iparent, intr, interrupt, trig, pol)); } fdt_pic_decode_t fdt_pic_table[] = { &imx6_decode_fdt, NULL }; #endif +/* + * Fix FDT data related to interrupts. + * + * Driven by the needs of linux and its drivers (as always), the published FDT + * data for imx6 now sets the interrupt parent for most devices to the GPC + * interrupt controller, which is for use when the chip is in deep-sleep mode. + * We don't support deep sleep or have a GPC-PIC driver; we need all interrupts + * to be handled by the GIC. + * + * Luckily, the change to the FDT data was to assign the GPC as the interrupt + * parent for the soc node and letting that get inherited by all other devices + * (except a few that directly name GIC as their interrupt parent). So we can + * set the world right by just changing the interrupt-parent property of the soc + * node to refer to GIC instead of GPC. This will get us by until we write our + * own GPC driver (or until linux changes its mind and the FDT data again). + * + * We validate that we have data that looks like we expect before changing it: + * - SOC node exists and has GPC as its interrupt parent. + * - GPC node exists and has GIC as its interrupt parent. + * - GIC node exists and is its own interrupt parent. + * + * This applies to all models of imx6. Luckily all of them have the devices + * involved at the same addresses on the same busses, so we don't need any + * per-soc logic. We handle this at platform attach time rather than via the + * fdt_fixup_table, because the latter requires matching on the FDT "model" + * property, and this applies to all boards including those not yet invented. + */ +static void +fix_fdt_interrupt_data(void) +{ + phandle_t gicipar, gicnode, gicxref; + phandle_t gpcipar, gpcnode, gpcxref; + phandle_t socipar, socnode; + int result; + + socnode = OF_finddevice("/soc"); + if (socnode == -1) + return; + result = OF_getencprop(socnode, "interrupt-parent", &socipar, + sizeof(socipar)); + if (result <= 0) + return; + + gicnode = OF_finddevice("/soc/interrupt-controller@00a01000"); + if (gicnode == -1) + return; + result = OF_getencprop(gicnode, "interrupt-parent", &gicipar, + sizeof(gicipar)); + if (result <= 0) + return; + gicxref = OF_xref_from_node(gicnode); + + gpcnode = OF_finddevice("/soc/aips-bus@02000000/gpc@020dc000"); + if (gpcnode == -1) + return; + result = OF_getencprop(gpcnode, "interrupt-parent", &gpcipar, + sizeof(gpcipar)); + if (result <= 0) + return; + gpcxref = OF_xref_from_node(gpcnode); + + if (socipar != gpcxref || gpcipar != gicxref || gicipar != gicxref) + return; + + gicxref = cpu_to_fdt32(gicxref); + OF_setprop(socnode, "interrupt-parent", &gicxref, sizeof(gicxref)); +} + static vm_offset_t imx6_lastaddr(platform_t plat) { return (arm_devmap_lastaddr()); } static int imx6_attach(platform_t plat) { + + /* Fix soc interrupt-parent property. */ + fix_fdt_interrupt_data(); + /* Inform the MPCore timer driver that its clock is variable. */ arm_tmr_change_frequency(ARM_TMR_FREQUENCY_VARIES); return (0); } static void imx6_late_init(platform_t plat) { const uint32_t IMX6_WDOG_SR_PHYS = 0x020bc004; imx_wdog_init_last_reset(IMX6_WDOG_SR_PHYS); /* Cache the gpio1 node handle for imx6_decode_fdt() workaround code. */ gpio1_node = OF_node_from_xref( OF_finddevice("/soc/aips-bus@02000000/gpio@0209c000")); } /* * Set up static device mappings. * * This attempts to cover the most-used devices with 1MB section mappings, which * is good for performance (uses fewer TLB entries for device access). * * ARMMP covers the interrupt controller, MPCore timers, global timer, and the * L2 cache controller. Most of the 1MB range is unused reserved space. * * AIPS1/AIPS2 cover most of the on-chip devices such as uart, spi, i2c, etc. * * Notably not mapped right now are HDMI, GPU, and other devices below ARMMP in * the memory map. When we get support for graphics it might make sense to * static map some of that area. Be careful with other things in that area such * as OCRAM that probably shouldn't be mapped as VM_MEMATTR_DEVICE memory. */ static int imx6_devmap_init(platform_t plat) { const uint32_t IMX6_ARMMP_PHYS = 0x00a00000; const uint32_t IMX6_ARMMP_SIZE = 0x00100000; const uint32_t IMX6_AIPS1_PHYS = 0x02000000; const uint32_t IMX6_AIPS1_SIZE = 0x00100000; const uint32_t IMX6_AIPS2_PHYS = 0x02100000; const uint32_t IMX6_AIPS2_SIZE = 0x00100000; arm_devmap_add_entry(IMX6_ARMMP_PHYS, IMX6_ARMMP_SIZE); arm_devmap_add_entry(IMX6_AIPS1_PHYS, IMX6_AIPS1_SIZE); arm_devmap_add_entry(IMX6_AIPS2_PHYS, IMX6_AIPS2_SIZE); return (0); } void cpu_reset(void) { const uint32_t IMX6_WDOG_CR_PHYS = 0x020bc000; imx_wdog_cpu_reset(IMX6_WDOG_CR_PHYS); } /* * Determine what flavor of imx6 we're running on. * * This code is based on the way u-boot does it. Information found on the web * indicates that Freescale themselves were the original source of this logic, * including the strange check for number of CPUs in the SCU configuration * register, which is apparently needed on some revisions of the SOLO. * * According to the documentation, there is such a thing as an i.MX6 Dual * (non-lite flavor). However, Freescale doesn't seem to have assigned it a * number or provided any logic to handle it in their detection code. * * Note that the ANALOG_DIGPROG and SCU configuration registers are not * documented in the chip reference manual. (SCU configuration is mentioned, * but not mapped out in detail.) I think the bottom two bits of the scu config * register may be ncpu-1. * * This hasn't been tested yet on a dual[-lite]. * * On a solo: * digprog = 0x00610001 * hwsoc = 0x00000062 * scu config = 0x00000500 * On a quad: * digprog = 0x00630002 * hwsoc = 0x00000063 * scu config = 0x00005503 */ u_int imx_soc_type() { uint32_t digprog, hwsoc; uint32_t *pcr; static u_int soctype; const vm_offset_t SCU_CONFIG_PHYSADDR = 0x00a00004; #define HWSOC_MX6SL 0x60 #define HWSOC_MX6DL 0x61 #define HWSOC_MX6SOLO 0x62 #define HWSOC_MX6Q 0x63 if (soctype != 0) return (soctype); digprog = imx6_anatop_read_4(IMX6_ANALOG_DIGPROG_SL); hwsoc = (digprog >> IMX6_ANALOG_DIGPROG_SOCTYPE_SHIFT) & IMX6_ANALOG_DIGPROG_SOCTYPE_MASK; if (hwsoc != HWSOC_MX6SL) { digprog = imx6_anatop_read_4(IMX6_ANALOG_DIGPROG); hwsoc = (digprog & IMX6_ANALOG_DIGPROG_SOCTYPE_MASK) >> IMX6_ANALOG_DIGPROG_SOCTYPE_SHIFT; /*printf("digprog = 0x%08x\n", digprog);*/ if (hwsoc == HWSOC_MX6DL) { pcr = arm_devmap_ptov(SCU_CONFIG_PHYSADDR, 4); if (pcr != NULL) { /*printf("scu config = 0x%08x\n", *pcr);*/ if ((*pcr & 0x03) == 0) { hwsoc = HWSOC_MX6SOLO; } } } } /* printf("hwsoc 0x%08x\n", hwsoc); */ switch (hwsoc) { case HWSOC_MX6SL: soctype = IMXSOC_6SL; break; case HWSOC_MX6SOLO: soctype = IMXSOC_6S; break; case HWSOC_MX6DL: soctype = IMXSOC_6DL; break; case HWSOC_MX6Q : soctype = IMXSOC_6Q; break; default: printf("imx_soc_type: Don't understand hwsoc 0x%02x, " "digprog 0x%08x; assuming IMXSOC_6Q\n", hwsoc, digprog); soctype = IMXSOC_6Q; break; } return (soctype); } /* * Early putc routine for EARLY_PRINTF support. To use, add to kernel config: * option SOCDEV_PA=0x02000000 * option SOCDEV_VA=0x02000000 * option EARLY_PRINTF * Resist the temptation to change the #if 0 to #ifdef EARLY_PRINTF here. It * makes sense now, but if multiple SOCs do that it will make early_putc another * duplicate symbol to be eliminated on the path to a generic kernel. */ #if 0 static void imx6_early_putc(int c) { volatile uint32_t * UART_STAT_REG = (uint32_t *)0x02020098; volatile uint32_t * UART_TX_REG = (uint32_t *)0x02020040; const uint32_t UART_TXRDY = (1 << 3); while ((*UART_STAT_REG & UART_TXRDY) == 0) continue; *UART_TX_REG = c; } early_putc_t *early_putc = imx6_early_putc; #endif static platform_method_t imx6_methods[] = { PLATFORMMETHOD(platform_attach, imx6_attach), PLATFORMMETHOD(platform_lastaddr, imx6_lastaddr), PLATFORMMETHOD(platform_devmap_init, imx6_devmap_init), PLATFORMMETHOD(platform_late_init, imx6_late_init), PLATFORMMETHOD_END, }; FDT_PLATFORM_DEF2(imx6, imx6s, "i.MX6 Solo", 0, "fsl,imx6s"); FDT_PLATFORM_DEF2(imx6, imx6d, "i.MX6 Dual", 0, "fsl,imx6d"); FDT_PLATFORM_DEF2(imx6, imx6q, "i.MX6 Quad", 0, "fsl,imx6q"); Index: projects/release-pkg/sys/arm/include/cpu-v6.h =================================================================== --- projects/release-pkg/sys/arm/include/cpu-v6.h (revision 295925) +++ projects/release-pkg/sys/arm/include/cpu-v6.h (revision 295926) @@ -1,589 +1,586 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MACHINE_CPU_V6_H #define MACHINE_CPU_V6_H /* There are no user serviceable parts here, they may change without notice */ #ifndef _KERNEL #error Only include this file in the kernel #endif #include #include #include #include #include #if __ARM_ARCH < 6 #error Only include this file for ARMv6 #else #define CPU_ASID_KERNEL 0 void dcache_wbinv_poc_all(void); /* !!! NOT SMP coherent function !!! */ vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t); vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t); #ifdef DEV_PMU #include #define PMU_OVSR_C 0x80000000 /* Cycle Counter */ extern uint32_t ccnt_hi[MAXCPU]; extern int pmu_attched; #endif /* DEV_PMU */ /* * Macros to generate CP15 (system control processor) read/write functions. */ #define _FX(s...) #s #define _RF0(fname, aname...) \ static __inline register_t \ fname(void) \ { \ register_t reg; \ __asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \ return(reg); \ } #define _R64F0(fname, aname) \ static __inline uint64_t \ fname(void) \ { \ uint64_t reg; \ __asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \ return(reg); \ } #define _WF0(fname, aname...) \ static __inline void \ fname(void) \ { \ __asm __volatile("mcr\t" _FX(aname)); \ } #define _WF1(fname, aname...) \ static __inline void \ fname(register_t reg) \ { \ __asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \ } #define _W64F1(fname, aname...) \ static __inline void \ fname(uint64_t reg) \ { \ __asm __volatile("mcrr\t" _FX(aname):: "r" (reg)); \ } /* * Raw CP15 maintenance operations * !!! not for external use !!! */ /* TLB */ _WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */ #if __ARM_ARCH >= 7 && defined SMP _WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */ #endif _WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */ #if __ARM_ARCH >= 7 && defined SMP _WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */ #endif _WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */ #if __ARM_ARCH >= 7 && defined SMP _WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */ #endif _WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */ _WF1(_CP15_TTB_SET, CP15_TTBR0(%0)) /* Cache and Branch predictor */ _WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */ #if __ARM_ARCH >= 7 && defined SMP _WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */ #endif _WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */ _WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */ _WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */ _WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */ #if __ARM_ARCH >= 7 _WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */ #endif _WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */ _WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */ _WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */ _WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */ #if __ARM_ARCH >= 7 && defined SMP _WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */ #endif _WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */ /* * Publicly accessible functions */ /* CP14 Debug Registers */ _RF0(cp14_dbgdidr_get, CP14_DBGDIDR(%0)) _RF0(cp14_dbgprsr_get, CP14_DBGPRSR(%0)) _RF0(cp14_dbgoslsr_get, CP14_DBGOSLSR(%0)) _RF0(cp14_dbgosdlr_get, CP14_DBGOSDLR(%0)) _RF0(cp14_dbgdscrint_get, CP14_DBGDSCRint(%0)) _WF1(cp14_dbgdscr_v6_set, CP14_DBGDSCRext_V6(%0)) _WF1(cp14_dbgdscr_v7_set, CP14_DBGDSCRext_V7(%0)) _WF1(cp14_dbgvcr_set, CP14_DBGVCR(%0)) _WF1(cp14_dbgoslar_set, CP14_DBGOSLAR(%0)) /* Various control registers */ _RF0(cp15_cpacr_get, CP15_CPACR(%0)) _WF1(cp15_cpacr_set, CP15_CPACR(%0)) _RF0(cp15_dfsr_get, CP15_DFSR(%0)) _RF0(cp15_ifsr_get, CP15_IFSR(%0)) _WF1(cp15_prrr_set, CP15_PRRR(%0)) _WF1(cp15_nmrr_set, CP15_NMRR(%0)) _RF0(cp15_ttbr_get, CP15_TTBR0(%0)) _RF0(cp15_dfar_get, CP15_DFAR(%0)) #if __ARM_ARCH >= 7 _RF0(cp15_ifar_get, CP15_IFAR(%0)) _RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0)) #endif -/* ARMv6+ and XScale */ _RF0(cp15_actlr_get, CP15_ACTLR(%0)) _WF1(cp15_actlr_set, CP15_ACTLR(%0)) -#if __ARM_ARCH >= 6 _WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0)) _WF1(cp15_ats1cpw_set, CP15_ATS1CPW(%0)) _RF0(cp15_par_get, CP15_PAR(%0)) _RF0(cp15_sctlr_get, CP15_SCTLR(%0)) -#endif /*CPU id registers */ _RF0(cp15_midr_get, CP15_MIDR(%0)) _RF0(cp15_ctr_get, CP15_CTR(%0)) _RF0(cp15_tcmtr_get, CP15_TCMTR(%0)) _RF0(cp15_tlbtr_get, CP15_TLBTR(%0)) _RF0(cp15_mpidr_get, CP15_MPIDR(%0)) _RF0(cp15_revidr_get, CP15_REVIDR(%0)) _RF0(cp15_ccsidr_get, CP15_CCSIDR(%0)) _RF0(cp15_clidr_get, CP15_CLIDR(%0)) _RF0(cp15_aidr_get, CP15_AIDR(%0)) _WF1(cp15_csselr_set, CP15_CSSELR(%0)) _RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0)) _RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0)) _RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0)) _RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0)) _RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0)) _RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0)) _RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0)) _RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0)) _RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0)) _RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0)) _RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0)) _RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0)) _RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0)) _RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0)) _RF0(cp15_cbar_get, CP15_CBAR(%0)) /* Performance Monitor registers */ #if __ARM_ARCH == 6 && defined(CPU_ARM1176) _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) _RF0(cp15_pmcr_get, CP15_PMCR(%0)) _WF1(cp15_pmcr_set, CP15_PMCR(%0)) _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) #elif __ARM_ARCH > 6 _RF0(cp15_pmcr_get, CP15_PMCR(%0)) _WF1(cp15_pmcr_set, CP15_PMCR(%0)) _RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0)) _WF1(cp15_pmcnten_set, CP15_PMCNTENSET(%0)) _WF1(cp15_pmcnten_clr, CP15_PMCNTENCLR(%0)) _RF0(cp15_pmovsr_get, CP15_PMOVSR(%0)) _WF1(cp15_pmovsr_set, CP15_PMOVSR(%0)) _WF1(cp15_pmswinc_set, CP15_PMSWINC(%0)) _RF0(cp15_pmselr_get, CP15_PMSELR(%0)) _WF1(cp15_pmselr_set, CP15_PMSELR(%0)) _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) _RF0(cp15_pmxevtyper_get, CP15_PMXEVTYPER(%0)) _WF1(cp15_pmxevtyper_set, CP15_PMXEVTYPER(%0)) _RF0(cp15_pmxevcntr_get, CP15_PMXEVCNTRR(%0)) _WF1(cp15_pmxevcntr_set, CP15_PMXEVCNTRR(%0)) _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) _RF0(cp15_pminten_get, CP15_PMINTENSET(%0)) _WF1(cp15_pminten_set, CP15_PMINTENSET(%0)) _WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0)) #endif _RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0)) _WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0)) _RF0(cp15_tpidruro_get, CP15_TPIDRURO(%0)) _WF1(cp15_tpidruro_set, CP15_TPIDRURO(%0)) _RF0(cp15_tpidrpwr_get, CP15_TPIDRPRW(%0)) _WF1(cp15_tpidrpwr_set, CP15_TPIDRPRW(%0)) /* Generic Timer registers - only use when you know the hardware is available */ _RF0(cp15_cntfrq_get, CP15_CNTFRQ(%0)) _WF1(cp15_cntfrq_set, CP15_CNTFRQ(%0)) _RF0(cp15_cntkctl_get, CP15_CNTKCTL(%0)) _WF1(cp15_cntkctl_set, CP15_CNTKCTL(%0)) _RF0(cp15_cntp_tval_get, CP15_CNTP_TVAL(%0)) _WF1(cp15_cntp_tval_set, CP15_CNTP_TVAL(%0)) _RF0(cp15_cntp_ctl_get, CP15_CNTP_CTL(%0)) _WF1(cp15_cntp_ctl_set, CP15_CNTP_CTL(%0)) _RF0(cp15_cntv_tval_get, CP15_CNTV_TVAL(%0)) _WF1(cp15_cntv_tval_set, CP15_CNTV_TVAL(%0)) _RF0(cp15_cntv_ctl_get, CP15_CNTV_CTL(%0)) _WF1(cp15_cntv_ctl_set, CP15_CNTV_CTL(%0)) _RF0(cp15_cnthctl_get, CP15_CNTHCTL(%0)) _WF1(cp15_cnthctl_set, CP15_CNTHCTL(%0)) _RF0(cp15_cnthp_tval_get, CP15_CNTHP_TVAL(%0)) _WF1(cp15_cnthp_tval_set, CP15_CNTHP_TVAL(%0)) _RF0(cp15_cnthp_ctl_get, CP15_CNTHP_CTL(%0)) _WF1(cp15_cnthp_ctl_set, CP15_CNTHP_CTL(%0)) _R64F0(cp15_cntpct_get, CP15_CNTPCT(%Q0, %R0)) _R64F0(cp15_cntvct_get, CP15_CNTVCT(%Q0, %R0)) _R64F0(cp15_cntp_cval_get, CP15_CNTP_CVAL(%Q0, %R0)) _W64F1(cp15_cntp_cval_set, CP15_CNTP_CVAL(%Q0, %R0)) _R64F0(cp15_cntv_cval_get, CP15_CNTV_CVAL(%Q0, %R0)) _W64F1(cp15_cntv_cval_set, CP15_CNTV_CVAL(%Q0, %R0)) _R64F0(cp15_cntvoff_get, CP15_CNTVOFF(%Q0, %R0)) _W64F1(cp15_cntvoff_set, CP15_CNTVOFF(%Q0, %R0)) _R64F0(cp15_cnthp_cval_get, CP15_CNTHP_CVAL(%Q0, %R0)) _W64F1(cp15_cnthp_cval_set, CP15_CNTHP_CVAL(%Q0, %R0)) #undef _FX #undef _RF0 #undef _WF0 #undef _WF1 /* * TLB maintenance operations. */ /* Local (i.e. not broadcasting ) operations. */ /* Flush all TLB entries (even global). */ static __inline void tlb_flush_all_local(void) { dsb(); _CP15_TLBIALL(); dsb(); } /* Flush all not global TLB entries. */ static __inline void tlb_flush_all_ng_local(void) { dsb(); _CP15_TLBIASID(CPU_ASID_KERNEL); dsb(); } /* Flush single TLB entry (even global). */ static __inline void tlb_flush_local(vm_offset_t va) { KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); dsb(); _CP15_TLBIMVA(va | CPU_ASID_KERNEL); dsb(); } /* Flush range of TLB entries (even global). */ static __inline void tlb_flush_range_local(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, size)); dsb(); for (; va < eva; va += PAGE_SIZE) _CP15_TLBIMVA(va | CPU_ASID_KERNEL); dsb(); } /* Broadcasting operations. */ #if __ARM_ARCH >= 7 && defined SMP static __inline void tlb_flush_all(void) { dsb(); _CP15_TLBIALLIS(); dsb(); } static __inline void tlb_flush_all_ng(void) { dsb(); _CP15_TLBIASIDIS(CPU_ASID_KERNEL); dsb(); } static __inline void tlb_flush(vm_offset_t va) { KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); dsb(); _CP15_TLBIMVAAIS(va); dsb(); } static __inline void tlb_flush_range(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, size)); dsb(); for (; va < eva; va += PAGE_SIZE) _CP15_TLBIMVAAIS(va); dsb(); } #else /* SMP */ #define tlb_flush_all() tlb_flush_all_local() #define tlb_flush_all_ng() tlb_flush_all_ng_local() #define tlb_flush(va) tlb_flush_local(va) #define tlb_flush_range(va, size) tlb_flush_range_local(va, size) #endif /* SMP */ /* * Cache maintenance operations. */ /* Sync I and D caches to PoU */ static __inline void icache_sync(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { #if __ARM_ARCH >= 7 && defined SMP _CP15_DCCMVAU(va); #else _CP15_DCCMVAC(va); #endif } dsb(); #if __ARM_ARCH >= 7 && defined SMP _CP15_ICIALLUIS(); #else _CP15_ICIALLU(); #endif dsb(); isb(); } /* Invalidate I cache */ static __inline void icache_inv_all(void) { #if __ARM_ARCH >= 7 && defined SMP _CP15_ICIALLUIS(); #else _CP15_ICIALLU(); #endif dsb(); isb(); } /* Invalidate branch predictor buffer */ static __inline void bpb_inv_all(void) { #if __ARM_ARCH >= 7 && defined SMP _CP15_BPIALLIS(); #else _CP15_BPIALL(); #endif dsb(); isb(); } /* Write back D-cache to PoU */ static __inline void dcache_wb_pou(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { #if __ARM_ARCH >= 7 && defined SMP _CP15_DCCMVAU(va); #else _CP15_DCCMVAC(va); #endif } dsb(); } /* * Invalidate D-cache to PoC * * Caches are invalidated from outermost to innermost as fresh cachelines * flow in this direction. In given range, if there was no dirty cacheline * in any cache before, no stale cacheline should remain in them after this * operation finishes. */ static __inline void dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { vm_offset_t eva = va + size; dsb(); /* invalidate L2 first */ cpu_l2cache_inv_range(pa, size); /* then L1 */ va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); } /* * Discard D-cache lines to PoC, prior to overwrite by DMA engine. * * Normal invalidation does L2 then L1 to ensure that stale data from L2 doesn't * flow into L1 while invalidating. This routine is intended to be used only * when invalidating a buffer before a DMA operation loads new data into memory. * The concern in this case is that dirty lines are not evicted to main memory, * overwriting the DMA data. For that reason, the L1 is done first to ensure * that an evicted L1 line doesn't flow to L2 after the L2 has been cleaned. */ static __inline void dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { vm_offset_t eva = va + size; /* invalidate L1 first */ dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); /* then L2 */ cpu_l2cache_inv_range(pa, size); } /* * Write back D-cache to PoC * * Caches are written back from innermost to outermost as dirty cachelines * flow in this direction. In given range, no dirty cacheline should remain * in any cache after this operation finishes. */ static __inline void dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { vm_offset_t eva = va + size; dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCCMVAC(va); } dsb(); cpu_l2cache_wb_range(pa, size); } /* Write back and invalidate D-cache to PoC */ static __inline void dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) { vm_offset_t va; vm_offset_t eva = sva + size; dsb(); /* write back L1 first */ va = sva & ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCCMVAC(va); } dsb(); /* then write back and invalidate L2 */ cpu_l2cache_wbinv_range(pa, size); /* then invalidate L1 */ va = sva & ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); } /* Set TTB0 register */ static __inline void cp15_ttbr_set(uint32_t reg) { dsb(); _CP15_TTB_SET(reg); dsb(); _CP15_BPIALL(); dsb(); isb(); tlb_flush_all_ng_local(); } #endif /* _KERNEL */ #endif /* !MACHINE_CPU_V6_H */ Index: projects/release-pkg/sys/arm64/conf/GENERIC =================================================================== --- projects/release-pkg/sys/arm64/conf/GENERIC (revision 295925) +++ projects/release-pkg/sys/arm64/conf/GENERIC (revision 295926) @@ -1,157 +1,158 @@ # # GENERIC -- Generic kernel configuration file for FreeBSD/arm64 # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ cpu ARM64 ident GENERIC makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support options SCHED_ULE # ULE scheduler options PREEMPTION # Enable kernel thread preemption options INET # InterNETworking options INET6 # IPv6 communications protocols options IPSEC # IP (v4/v6) security options TCP_OFFLOAD # TCP offload options SCTP # Stream Control Transmission Protocol options FFS # Berkeley Fast Filesystem options SOFTUPDATES # Enable FFS soft updates support options UFS_ACL # Support for access control lists options UFS_DIRHASH # Improve performance on big directories options UFS_GJOURNAL # Enable gjournal-based UFS journaling options QUOTA # Enable disk quotas for UFS options MD_ROOT # MD is a potential root device options NFSCL # Network Filesystem Client options NFSD # Network Filesystem Server options NFSLOCKD # Network Lock Manager options NFS_ROOT # NFS usable as /, requires NFSCL options MSDOSFS # MSDOS Filesystem options CD9660 # ISO 9660 Filesystem options PROCFS # Process filesystem (requires PSEUDOFS) options PSEUDOFS # Pseudo-filesystem framework options GEOM_PART_GPT # GUID Partition Tables. options GEOM_RAID # Soft RAID functionality. options GEOM_LABEL # Provides labelization options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI options KTRACE # ktrace(1) support options STACK # stack(9) support options SYSVSHM # SYSV-style shared memory options SYSVMSG # SYSV-style message queues options SYSVSEM # SYSV-style semaphores options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. options KBD_INSTALL_CDEV # install a CDEV entry in /dev options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4) options AUDIT # Security event auditing options CAPABILITY_MODE # Capsicum capability mode options CAPABILITIES # Capsicum capabilities options MAC # TrustedBSD MAC Framework options KDTRACE_FRAME # Ensure frames are compiled in options KDTRACE_HOOKS # Kernel DTrace hooks options VFP # Floating-point support options RACCT # Resource accounting framework options RACCT_DEFAULT_TO_DISABLED # Set kern.racct.enable=0 by default options RCTL # Resource limits options SMP # Debugging support. Always need this: options KDB # Enable kernel debugger support. options KDB_TRACE # Print a stack trace for a panic. # For full debugger support use (turn off in stable branch): options DDB # Support DDB. #options GDB # Support remote GDB. options DEADLKRES # Enable the deadlock resolver options INVARIANTS # Enable calls of extra sanity checking options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS options WITNESS # Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones # SoC support options SOC_CAVM_THUNDERX options SOC_HISI_HI6220 # VirtIO support device virtio device virtio_mmio device virtio_blk device vtnet # Bus drivers device pci options PCI_IOV # PCI SR-IOV support # Ethernet NICs device mii device miibus # MII bus support device em # Intel PRO/1000 Gigabit Ethernet Family device igb # Intel PRO/1000 PCIE Server Gigabit Family +device ix # Intel 10Gb Ethernet Family device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet device vnic # Cavium ThunderX NIC # Block devices device ahci device scbus device da # ATA/SCSI peripherals device pass # Passthrough device (direct ATA/SCSI access) # MMC/SD/SDIO Card slot support device mmc # mmc/sd bus device mmcsd # mmc/sd flash cards device dwmmc # Serial (COM) ports device uart # Generic UART driver device pl011 # USB support options USB_DEBUG # enable debug msgs device dwcotg # DWC OTG controller device xhci # XHCI PCI->USB interface (USB 3.0) device usb # USB Bus (required) device ukbd # Keyboard device umass # Disks/Mass storage - Requires scbus and da # Pseudo devices. device loop # Network loopback device random # Entropy device device ether # Ethernet support device vlan # 802.1Q VLAN support device tun # Packet tunnel. device md # Memory "disks" device gif # IPv6 and IPv4 tunneling device firmware # firmware assist module device psci # Support for ARM PSCI # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter # Chip-specific errata options THUNDERX_PASS_1_1_ERRATA options FDT device acpi # The crypto framework is required by IPSEC device crypto # Required by IPSEC Index: projects/release-pkg/sys/boot/fdt/dts/riscv/qemu.dts =================================================================== --- projects/release-pkg/sys/boot/fdt/dts/riscv/qemu.dts (revision 295925) +++ projects/release-pkg/sys/boot/fdt/dts/riscv/qemu.dts (revision 295926) @@ -1,92 +1,92 @@ /*- * Copyright (c) 2016 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /dts-v1/; / { model = "QEMU RV64I"; compatible = "riscv,rv64i"; #address-cells = <1>; #size-cells = <1>; #interrupt-cells = <1>; aliases { console0 = &console0; }; memory { device_type = "memory"; - reg = <0x0 0x8000000>; /* 128MB at 0x0 */ + reg = <0x0 0x40000000>; /* 1GB at 0x0 */ }; soc { #address-cells = <2>; #size-cells = <2>; #interrupt-cells = <1>; compatible = "simple-bus"; ranges; pic0: pic@0 { compatible = "riscv,pic"; interrupt-controller; }; timer0: timer@0 { compatible = "riscv,timer"; interrupts = < 1 >; interrupt-parent = < &pic0 >; clock-frequency = < 400000000 >; }; htif0: htif@0 { compatible = "riscv,htif"; interrupts = < 0 >; interrupt-parent = < &pic0 >; console0: console@0 { compatible = "htif,console"; status = "okay"; }; }; }; chosen { bootargs = "-v"; stdin = "console0"; stdout = "console0"; }; }; Index: projects/release-pkg/sys/boot/fdt/dts/riscv/spike.dts =================================================================== --- projects/release-pkg/sys/boot/fdt/dts/riscv/spike.dts (revision 295925) +++ projects/release-pkg/sys/boot/fdt/dts/riscv/spike.dts (revision 295926) @@ -1,92 +1,92 @@ /*- * Copyright (c) 2015 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /dts-v1/; / { model = "UC Berkeley Spike Simulator RV64I"; compatible = "riscv,rv64i"; #address-cells = <1>; #size-cells = <1>; #interrupt-cells = <1>; aliases { console0 = &console0; }; memory { device_type = "memory"; - reg = <0x0 0x8000000>; /* 128MB at 0x0 */ + reg = <0x0 0x40000000>; /* 1GB at 0x0 */ }; soc { #address-cells = <2>; #size-cells = <2>; #interrupt-cells = <1>; compatible = "simple-bus"; ranges; pic0: pic@0 { compatible = "riscv,pic"; interrupt-controller; }; timer0: timer@0 { compatible = "riscv,timer"; interrupts = < 1 >; interrupt-parent = < &pic0 >; clock-frequency = < 1000000 >; }; htif0: htif@0 { compatible = "riscv,htif"; interrupts = < 0 >; interrupt-parent = < &pic0 >; console0: console@0 { compatible = "htif,console"; status = "okay"; }; }; }; chosen { bootargs = "-v"; stdin = "console0"; stdout = "console0"; }; }; Index: projects/release-pkg/sys/boot/uboot/lib/net.c =================================================================== --- projects/release-pkg/sys/boot/uboot/lib/net.c (revision 295925) +++ projects/release-pkg/sys/boot/uboot/lib/net.c (revision 295926) @@ -1,355 +1,362 @@ /*- * Copyright (c) 2000-2001 Benno Rice * Copyright (c) 2007 Semihalf, Rafal Jaworowski * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include "api_public.h" #include "glue.h" #include "libuboot.h" #include "dev_net.h" static int net_probe(struct netif *, void *); static int net_match(struct netif *, void *); static void net_init(struct iodesc *, void *); static int net_get(struct iodesc *, void *, size_t, time_t); static int net_put(struct iodesc *, void *, size_t); static void net_end(struct netif *); extern struct netif_stats net_stats[]; struct netif_dif net_ifs[] = { /* dif_unit dif_nsel dif_stats dif_private */ { 0, 1, &net_stats[0], 0, }, }; struct netif_stats net_stats[NENTS(net_ifs)]; struct netif_driver uboot_net = { "uboot_eth", /* netif_bname */ net_match, /* netif_match */ net_probe, /* netif_probe */ net_init, /* netif_init */ net_get, /* netif_get */ net_put, /* netif_put */ net_end, /* netif_end */ net_ifs, /* netif_ifs */ NENTS(net_ifs) /* netif_nifs */ }; struct uboot_softc { uint32_t sc_pad; uint8_t sc_rxbuf[ETHER_MAX_LEN]; uint8_t sc_txbuf[ETHER_MAX_LEN + PKTALIGN]; uint8_t *sc_txbufp; int sc_handle; /* device handle for ub_dev_xxx */ }; static struct uboot_softc uboot_softc; /* * get_env_net_params() * * Attempt to obtain all the parms we need for netbooting from the U-Boot * environment. If we fail to obtain the values it may still be possible to * netboot; the net_dev code will attempt to get the values from bootp, rarp, * and other such sources. * * If rootip.s_addr is non-zero net_dev assumes the required global variables * are set and skips the bootp inquiry. For that reason, we don't set rootip * until we've verified that we have at least the minimum required info. * * This is called from netif_init() which can result in it getting called * multiple times, by design. The network code at higher layers zeroes out * rootip when it closes a network interface, so if it gets opened again we have * to obtain all this info again. */ static void get_env_net_params() { char *envstr; in_addr_t rootaddr, serveraddr; - /* Silently get out right away if we don't have rootpath. */ - if (ub_env_get("rootpath") == NULL) + /* + * Silently get out right away if we don't have rootpath, because none + * of the other info we obtain below is sufficient to boot without it. + * + * If we do have rootpath, copy it into the global var and also set + * dhcp.root-path in the env. If we don't get all the other info from + * the u-boot env below, we will still try dhcp/bootp, but the server- + * provided path will not replace the user-provided value we set here. + */ + if ((envstr = ub_env_get("rootpath")) == NULL) return; + strlcpy(rootpath, envstr, sizeof(rootpath)); + setenv("dhcp.root-path", rootpath, 0); /* * Our own IP address must be valid. Silently get out if it's not set, * but whine if it's there and we can't parse it. */ if ((envstr = ub_env_get("ipaddr")) == NULL) return; if ((myip.s_addr = inet_addr(envstr)) == INADDR_NONE) { printf("Could not parse ipaddr '%s'\n", envstr); return; } /* * Netmask is optional, default to the "natural" netmask for our IP, but * whine if it was provided and we couldn't parse it. */ if ((envstr = ub_env_get("netmask")) != NULL && (netmask = inet_addr(envstr)) == INADDR_NONE) { printf("Could not parse netmask '%s'\n", envstr); } if (netmask == INADDR_NONE) { if (IN_CLASSA(myip.s_addr)) netmask = IN_CLASSA_NET; else if (IN_CLASSB(myip.s_addr)) netmask = IN_CLASSB_NET; else netmask = IN_CLASSC_NET; } /* * Get optional serverip before rootpath; the latter can override it. * Whine only if it's present but can't be parsed. */ serveraddr = INADDR_NONE; if ((envstr = ub_env_get("serverip")) != NULL) { if ((serveraddr = inet_addr(envstr)) == INADDR_NONE) printf("Could not parse serverip '%s'\n", envstr); } /* * There must be a rootpath. It may be ip:/path or it may be just the * path in which case the ip needs to be in serverip. */ - if ((envstr = ub_env_get("rootpath")) == NULL) - return; - strncpy(rootpath, envstr, sizeof(rootpath) - 1); rootaddr = net_parse_rootpath(); if (rootaddr == INADDR_NONE) rootaddr = serveraddr; if (rootaddr == INADDR_NONE) { printf("No server address for rootpath '%s'\n", envstr); return; } rootip.s_addr = rootaddr; /* * Gateway IP is optional unless rootip is on a different net in which * case whine if it's missing or we can't parse it, and set rootip addr * to zero, which signals to other network code that network params * aren't set (so it will try dhcp, bootp, etc). */ envstr = ub_env_get("gatewayip"); if (!SAMENET(myip, rootip, netmask)) { if (envstr == NULL) { printf("Need gatewayip for a root server on a " "different network.\n"); rootip.s_addr = 0; return; } if ((gateip.s_addr = inet_addr(envstr) == INADDR_NONE)) { printf("Could not parse gatewayip '%s'\n", envstr); rootip.s_addr = 0; return; } } } static int net_match(struct netif *nif, void *machdep_hint) { char **a = (char **)machdep_hint; if (memcmp("net", *a, 3) == 0) return (1); printf("net_match: could not match network device\n"); return (0); } static int net_probe(struct netif *nif, void *machdep_hint) { struct device_info *di; int i; for (i = 0; i < devs_no; i++) if ((di = ub_dev_get(i)) != NULL) if (di->type == DEV_TYP_NET) break; if (i == devs_no) { printf("net_probe: no network devices found, maybe not" " enumerated yet..?\n"); return (-1); } #if defined(NETIF_DEBUG) printf("net_probe: network device found: %d\n", i); #endif uboot_softc.sc_handle = i; return (0); } static int net_put(struct iodesc *desc, void *pkt, size_t len) { struct netif *nif = desc->io_netif; struct uboot_softc *sc = nif->nif_devdata; size_t sendlen; ssize_t rv; #if defined(NETIF_DEBUG) struct ether_header *eh; printf("net_put: desc %p, pkt %p, len %d\n", desc, pkt, len); eh = pkt; printf("dst: %s ", ether_sprintf(eh->ether_dhost)); printf("src: %s ", ether_sprintf(eh->ether_shost)); printf("type: 0x%x\n", eh->ether_type & 0xffff); #endif if (len < ETHER_MIN_LEN - ETHER_CRC_LEN) { sendlen = ETHER_MIN_LEN - ETHER_CRC_LEN; bzero(sc->sc_txbufp, sendlen); } else sendlen = len; memcpy(sc->sc_txbufp, pkt, len); rv = ub_dev_send(sc->sc_handle, sc->sc_txbufp, sendlen); #if defined(NETIF_DEBUG) printf("net_put: ub_send returned %d\n", rv); #endif if (rv == 0) rv = len; else rv = -1; return (rv); } static int net_get(struct iodesc *desc, void *pkt, size_t len, time_t timeout) { struct netif *nif = desc->io_netif; struct uboot_softc *sc = nif->nif_devdata; time_t t; int err, rlen; #if defined(NETIF_DEBUG) printf("net_get: pkt %p, len %d, timeout %d\n", pkt, len, timeout); #endif t = getsecs(); do { err = ub_dev_recv(sc->sc_handle, sc->sc_rxbuf, len, &rlen); if (err != 0) { printf("net_get: ub_dev_recv() failed, error=%d\n", err); rlen = 0; break; } } while ((rlen == -1 || rlen == 0) && (getsecs() - t < timeout)); #if defined(NETIF_DEBUG) printf("net_get: received len %d (%x)\n", rlen, rlen); #endif if (rlen > 0) { memcpy(pkt, sc->sc_rxbuf, MIN(len, rlen)); if (rlen != len) { #if defined(NETIF_DEBUG) printf("net_get: len %x, rlen %x\n", len, rlen); #endif } return (rlen); } return (-1); } static void net_init(struct iodesc *desc, void *machdep_hint) { struct netif *nif = desc->io_netif; struct uboot_softc *sc; struct device_info *di; int err; sc = nif->nif_devdata = &uboot_softc; if ((err = ub_dev_open(sc->sc_handle)) != 0) panic("%s%d: initialisation failed with error %d\n", nif->nif_driver->netif_bname, nif->nif_unit, err); /* Get MAC address */ di = ub_dev_get(sc->sc_handle); memcpy(desc->myea, di->di_net.hwaddr, 6); if (memcmp (desc->myea, "\0\0\0\0\0\0", 6) == 0) { panic("%s%d: empty ethernet address!", nif->nif_driver->netif_bname, nif->nif_unit); } /* Attempt to get netboot params from the u-boot env. */ get_env_net_params(); if (myip.s_addr != 0) desc->myip = myip; #if defined(NETIF_DEBUG) printf("network: %s%d attached to %s\n", nif->nif_driver->netif_bname, nif->nif_unit, ether_sprintf(desc->myea)); #endif /* Set correct alignment for TX packets */ sc->sc_txbufp = sc->sc_txbuf; if ((unsigned long)sc->sc_txbufp % PKTALIGN) sc->sc_txbufp += PKTALIGN - (unsigned long)sc->sc_txbufp % PKTALIGN; } static void net_end(struct netif *nif) { struct uboot_softc *sc = nif->nif_devdata; int err; if ((err = ub_dev_close(sc->sc_handle)) != 0) panic("%s%d: net_end failed with error %d\n", nif->nif_driver->netif_bname, nif->nif_unit, err); } Index: projects/release-pkg/sys/boot =================================================================== --- projects/release-pkg/sys/boot (revision 295925) +++ projects/release-pkg/sys/boot (revision 295926) Property changes on: projects/release-pkg/sys/boot ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/boot:r295886-295925 Index: projects/release-pkg/sys/compat/cloudabi/cloudabi_mem.c =================================================================== --- projects/release-pkg/sys/compat/cloudabi/cloudabi_mem.c (revision 295925) +++ projects/release-pkg/sys/compat/cloudabi/cloudabi_mem.c (revision 295926) @@ -1,179 +1,197 @@ /*- * Copyright (c) 2015 Nuxi, https://nuxi.nl/ * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include /* Converts CloudABI's memory protection flags to FreeBSD's. */ static int -convert_mprot(cloudabi_mprot_t in) +convert_mprot(cloudabi_mprot_t in, int *out) { - int out; - out = 0; + /* Unknown protection flags. */ + if ((in & ~(CLOUDABI_PROT_EXEC | CLOUDABI_PROT_WRITE | + CLOUDABI_PROT_READ)) != 0) + return (ENOTSUP); + /* W^X: Write and exec cannot be enabled at the same time. */ + if ((in & (CLOUDABI_PROT_EXEC | CLOUDABI_PROT_WRITE)) == + (CLOUDABI_PROT_EXEC | CLOUDABI_PROT_WRITE)) + return (ENOTSUP); + + *out = 0; if (in & CLOUDABI_PROT_EXEC) - out |= PROT_EXEC; + *out |= PROT_EXEC; if (in & CLOUDABI_PROT_WRITE) - out |= PROT_WRITE; + *out |= PROT_WRITE; if (in & CLOUDABI_PROT_READ) - out |= PROT_READ; - return (out); + *out |= PROT_READ; + return (0); } int cloudabi_sys_mem_advise(struct thread *td, struct cloudabi_sys_mem_advise_args *uap) { struct madvise_args madvise_args = { .addr = uap->addr, .len = uap->len }; switch (uap->advice) { case CLOUDABI_ADVICE_DONTNEED: madvise_args.behav = MADV_DONTNEED; break; case CLOUDABI_ADVICE_NORMAL: madvise_args.behav = MADV_NORMAL; break; case CLOUDABI_ADVICE_RANDOM: madvise_args.behav = MADV_RANDOM; break; case CLOUDABI_ADVICE_SEQUENTIAL: madvise_args.behav = MADV_SEQUENTIAL; break; case CLOUDABI_ADVICE_WILLNEED: madvise_args.behav = MADV_WILLNEED; break; default: return (EINVAL); } return (sys_madvise(td, &madvise_args)); } int cloudabi_sys_mem_lock(struct thread *td, struct cloudabi_sys_mem_lock_args *uap) { struct mlock_args mlock_args = { .addr = uap->addr, .len = uap->len }; return (sys_mlock(td, &mlock_args)); } int cloudabi_sys_mem_map(struct thread *td, struct cloudabi_sys_mem_map_args *uap) { struct mmap_args mmap_args = { .addr = uap->addr, .len = uap->len, - .prot = convert_mprot(uap->prot), .fd = uap->fd, .pos = uap->off }; + int error; /* Translate flags. */ if (uap->flags & CLOUDABI_MAP_ANON) mmap_args.flags |= MAP_ANON; if (uap->flags & CLOUDABI_MAP_FIXED) mmap_args.flags |= MAP_FIXED; if (uap->flags & CLOUDABI_MAP_PRIVATE) mmap_args.flags |= MAP_PRIVATE; if (uap->flags & CLOUDABI_MAP_SHARED) mmap_args.flags |= MAP_SHARED; + /* Translate protection. */ + error = convert_mprot(uap->prot, &mmap_args.prot); + if (error != 0) + return (error); + return (sys_mmap(td, &mmap_args)); } int cloudabi_sys_mem_protect(struct thread *td, struct cloudabi_sys_mem_protect_args *uap) { struct mprotect_args mprotect_args = { .addr = uap->addr, .len = uap->len, - .prot = convert_mprot(uap->prot), }; + int error; + + /* Translate protection. */ + error = convert_mprot(uap->prot, &mprotect_args.prot); + if (error != 0) + return (error); return (sys_mprotect(td, &mprotect_args)); } int cloudabi_sys_mem_sync(struct thread *td, struct cloudabi_sys_mem_sync_args *uap) { struct msync_args msync_args = { .addr = uap->addr, .len = uap->len, }; /* Convert flags. */ switch (uap->flags & (CLOUDABI_MS_ASYNC | CLOUDABI_MS_SYNC)) { case CLOUDABI_MS_ASYNC: msync_args.flags |= MS_ASYNC; break; case CLOUDABI_MS_SYNC: msync_args.flags |= MS_SYNC; break; default: return (EINVAL); } if ((uap->flags & CLOUDABI_MS_INVALIDATE) != 0) msync_args.flags |= MS_INVALIDATE; return (sys_msync(td, &msync_args)); } int cloudabi_sys_mem_unlock(struct thread *td, struct cloudabi_sys_mem_unlock_args *uap) { struct munlock_args munlock_args = { .addr = uap->addr, .len = uap->len }; return (sys_munlock(td, &munlock_args)); } int cloudabi_sys_mem_unmap(struct thread *td, struct cloudabi_sys_mem_unmap_args *uap) { struct munmap_args munmap_args = { .addr = uap->addr, .len = uap->len }; return (sys_munmap(td, &munmap_args)); } Index: projects/release-pkg/sys/conf/files.riscv =================================================================== --- projects/release-pkg/sys/conf/files.riscv (revision 295925) +++ projects/release-pkg/sys/conf/files.riscv (revision 295926) @@ -1,44 +1,45 @@ # $FreeBSD$ crypto/blowfish/bf_enc.c optional crypto | ipsec crypto/des/des_enc.c optional crypto | ipsec | netsmb kern/kern_clocksource.c standard kern/subr_dummy_vdso_tc.c standard libkern/bcmp.c standard libkern/ffs.c standard libkern/ffsl.c standard libkern/fls.c standard libkern/flsl.c standard libkern/flsll.c standard libkern/memmove.c standard libkern/memset.c standard riscv/htif/htif.c standard riscv/htif/htif_block.c standard riscv/htif/htif_console.c standard riscv/riscv/autoconf.c standard riscv/riscv/bcopy.c standard riscv/riscv/bus_machdep.c standard riscv/riscv/busdma_machdep.c standard riscv/riscv/clock.c standard riscv/riscv/copyinout.S standard riscv/riscv/copystr.c standard riscv/riscv/cpufunc_asm.S standard riscv/riscv/devmap.c standard riscv/riscv/dump_machdep.c standard riscv/riscv/elf_machdep.c standard riscv/riscv/intr_machdep.c standard riscv/riscv/in_cksum.c optional inet | inet6 riscv/riscv/identcpu.c standard riscv/riscv/locore.S standard no-obj riscv/riscv/minidump_machdep.c standard riscv/riscv/machdep.c standard riscv/riscv/mem.c standard riscv/riscv/nexus.c standard riscv/riscv/pmap.c standard -riscv/riscv/sys_machdep.c standard +riscv/riscv/stack_machdep.c optional ddb | stack riscv/riscv/support.S standard riscv/riscv/swtch.S standard +riscv/riscv/sys_machdep.c standard riscv/riscv/trap.c standard riscv/riscv/timer.c standard riscv/riscv/uio_machdep.c standard riscv/riscv/uma_machdep.c standard riscv/riscv/vm_machdep.c standard Index: projects/release-pkg/sys/conf/kern.post.mk =================================================================== --- projects/release-pkg/sys/conf/kern.post.mk (revision 295925) +++ projects/release-pkg/sys/conf/kern.post.mk (revision 295926) @@ -1,400 +1,399 @@ # $FreeBSD$ # Part of a unified Makefile for building kernels. This part includes all # the definitions that need to be after all the % directives except %RULES # and ones that act like they are part of %RULES. # # Most make variables should not be defined in this file. Instead, they # should be defined in the kern.pre.mk so that port makefiles can # override or augment them. # In case the config had a makeoptions DESTDIR... .if defined(DESTDIR) MKMODULESENV+= DESTDIR="${DESTDIR}" .endif SYSDIR?= ${S:C;^[^/];${.CURDIR}/&;} MKMODULESENV+= KERNBUILDDIR="${.CURDIR}" SYSDIR="${SYSDIR}" .if defined(CONF_CFLAGS) MKMODULESENV+= CONF_CFLAGS="${CONF_CFLAGS}" .endif .if defined(WITH_CTF) MKMODULESENV+= WITH_CTF="${WITH_CTF}" .endif # Allow overriding the kernel debug directory, so kernel and user debug may be # installed in different directories. Setting it to "" restores the historical # behavior of installing debug files in the kernel directory. KERN_DEBUGDIR?= ${DEBUGDIR} .MAIN: all .for target in all clean cleandepend cleandir clobber depend install \ obj reinstall tags ${target}: kernel-${target} .if !defined(MODULES_WITH_WORLD) && !defined(NO_MODULES) && exists($S/modules) ${target}: modules-${target} modules-${target}: cd $S/modules; ${MKMODULESENV} ${MAKE} \ ${target:S/^reinstall$/install/:S/^clobber$/cleandir/} .endif .endfor # Handle ports (as defined by the user) that build kernel modules .if !defined(NO_MODULES) && defined(PORTS_MODULES) # # The ports tree needs some environment variables defined to match the new kernel # # Ports search for some dependencies in PATH, so add the location of the installed files LOCALBASE?= /usr/local # SRC_BASE is how the ports tree refers to the location of the base source files .if !defined(SRC_BASE) SRC_BASE= ${SYSDIR:H:tA} .endif # OSVERSION is used by some ports to determine build options .if !defined(OSRELDATE) # Definition copied from src/Makefile.inc1 OSRELDATE!= awk '/^\#define[[:space:]]*__FreeBSD_version/ { print $$3 }' \ ${MAKEOBJDIRPREFIX}${SRC_BASE}/include/osreldate.h .endif # Keep the related ports builds in the obj directory so that they are only rebuilt once per kernel build WRKDIRPREFIX?= ${MAKEOBJDIRPREFIX}${SRC_BASE}/sys/${KERNCONF} PORTSMODULESENV=\ PATH=${PATH}:${LOCALBASE}/bin:${LOCALBASE}/sbin \ SRC_BASE=${SRC_BASE} \ OSVERSION=${OSRELDATE} \ WRKDIRPREFIX=${WRKDIRPREFIX} # The WRKDIR needs to be cleaned before building, and trying to change the target # with a :C pattern below results in install -> instclean all: .for __i in ${PORTS_MODULES} @${ECHO} "===> Ports module ${__i} (all)" cd $${PORTSDIR:-/usr/ports}/${__i}; ${PORTSMODULESENV} ${MAKE} -B clean all .endfor .for __target in install reinstall clean ${__target}: ports-${__target} ports-${__target}: .for __i in ${PORTS_MODULES} @${ECHO} "===> Ports module ${__i} (${__target})" cd $${PORTSDIR:-/usr/ports}/${__i}; ${PORTSMODULESENV} ${MAKE} -B ${__target:C/install/deinstall reinstall/:C/reinstall/deinstall reinstall/} .endfor .endfor .endif .ORDER: kernel-install modules-install kernel-all: ${KERNEL_KO} ${KERNEL_EXTRA} kernel-cleandir: kernel-clean kernel-cleandepend kernel-clobber: find . -maxdepth 1 ! -type d ! -name version -delete kernel-obj: .if !defined(MODULES_WITH_WORLD) && !defined(NO_MODULES) && exists($S/modules) modules: modules-all .if !defined(NO_MODULES_OBJ) modules-all modules-depend: modules-obj .endif .endif .if !defined(DEBUG) FULLKERNEL= ${KERNEL_KO} .else FULLKERNEL= ${KERNEL_KO}.full ${KERNEL_KO}: ${FULLKERNEL} ${KERNEL_KO}.debug ${OBJCOPY} --strip-debug --add-gnu-debuglink=${KERNEL_KO}.debug \ ${FULLKERNEL} ${.TARGET} ${KERNEL_KO}.debug: ${FULLKERNEL} ${OBJCOPY} --only-keep-debug ${FULLKERNEL} ${.TARGET} install.debug reinstall.debug: gdbinit cd ${.CURDIR}; ${MAKE} ${.TARGET:R} # Install gdbinit files for kernel debugging. gdbinit: grep -v '# XXX' ${S}/../tools/debugscripts/dot.gdbinit | \ sed "s:MODPATH:${.OBJDIR}/modules:" > .gdbinit cp ${S}/../tools/debugscripts/gdbinit.kernel ${.CURDIR} .if exists(${S}/../tools/debugscripts/gdbinit.${MACHINE_CPUARCH}) cp ${S}/../tools/debugscripts/gdbinit.${MACHINE_CPUARCH} \ ${.CURDIR}/gdbinit.machine .endif .endif ${FULLKERNEL}: ${SYSTEM_DEP} vers.o @rm -f ${.TARGET} @echo linking ${.TARGET} ${SYSTEM_LD} .if !empty(MD_ROOT_SIZE_CONFIGURED) && defined(MFS_IMAGE) @sh ${S}/tools/embed_mfs.sh ${.TARGET} ${MFS_IMAGE} .endif .if ${MK_CTF} != "no" @echo ${CTFMERGE} ${CTFFLAGS} -o ${.TARGET} ... @${CTFMERGE} ${CTFFLAGS} -o ${.TARGET} ${SYSTEM_OBJS} vers.o .endif .if !defined(DEBUG) ${OBJCOPY} --strip-debug ${.TARGET} .endif ${SYSTEM_LD_TAIL} .if !exists(${.OBJDIR}/.depend) ${SYSTEM_OBJS}: assym.s vnode_if.h ${BEFORE_DEPEND:M*.h} ${MFILES:T:S/.m$/.h/} .endif LNFILES= ${CFILES:T:S/.c$/.ln/} .for mfile in ${MFILES} # XXX the low quality .m.o rules gnerated by config are normally used # instead of the .m.c rules here. ${mfile:T:S/.m$/.c/}: ${mfile} ${AWK} -f $S/tools/makeobjops.awk ${mfile} -c ${mfile:T:S/.m$/.h/}: ${mfile} ${AWK} -f $S/tools/makeobjops.awk ${mfile} -h .endfor kernel-clean: rm -f *.o *.so *.So *.ko *.s eddep errs \ ${FULLKERNEL} ${KERNEL_KO} ${KERNEL_KO}.debug \ linterrs tags vers.c \ vnode_if.c vnode_if.h vnode_if_newproto.h vnode_if_typedef.h \ ${MFILES:T:S/.m$/.c/} ${MFILES:T:S/.m$/.h/} \ ${CLEAN} lint: ${LNFILES} ${LINT} ${LINTKERNFLAGS} ${CFLAGS:M-[DILU]*} ${.ALLSRC} 2>&1 | \ tee -a linterrs # This is a hack. BFD "optimizes" away dynamic mode if there are no # dynamic references. We could probably do a '-Bforcedynamic' mode like # in the a.out ld. For now, this works. HACK_EXTRA_FLAGS?= -shared hack.So: Makefile :> hack.c ${CC} ${HACK_EXTRA_FLAGS} -nostdlib hack.c -o hack.So rm -f hack.c # This rule stops ./assym.s in .depend from causing problems. ./assym.s: assym.s assym.s: $S/kern/genassym.sh genassym.o NM='${NM}' NMFLAGS='${NMFLAGS}' sh $S/kern/genassym.sh genassym.o > ${.TARGET} genassym.o: $S/$M/$M/genassym.c ${CC} -c ${CFLAGS:N-fno-common} $S/$M/$M/genassym.c ${SYSTEM_OBJS} genassym.o vers.o: opt_global.h # Normal files first CFILES_NORMAL= ${CFILES:N*/cddl/*:N*fs/nfsclient/nfs_clkdtrace*:N*/compat/linuxkpi/common/*:N*/ofed/*:N*/dev/mlx5/*} SFILES_NORMAL= ${SFILES:N*/cddl/*} # We have "special" -I include paths for zfs/dtrace files in 'depend'. CFILES_CDDL= ${CFILES:M*/cddl/*} SFILES_CDDL= ${SFILES:M*/cddl/*} # We have "special" -I include paths for LinuxKPI. CFILES_LINUXKPI=${CFILES:M*/compat/linuxkpi/common/*} # We have "special" -I include paths for OFED. CFILES_OFED=${CFILES:M*/ofed/*} # We have "special" -I include paths for MLX5. CFILES_MLX5=${CFILES:M*/dev/mlx5/*} # Skip reading .depend when not needed to speed up tree-walks # and simple lookups. .if !empty(.MAKEFLAGS:M-V${_V_READ_DEPEND}) || make(obj) || make(clean*) || \ make(install*) || make(kernel-obj) || make(kernel-clean*) || \ make(kernel-install*) _SKIP_READ_DEPEND= 1 .MAKE.DEPENDFILE= /dev/null .endif kernel-depend: .depend # The argument list can be very long, so use make -V and xargs to # pass it to mkdep. _MKDEPCC:= ${CC:N${CCACHE_BIN}} SRCS= assym.s vnode_if.h ${BEFORE_DEPEND} ${CFILES} \ ${SYSTEM_CFILES} ${GEN_CFILES} ${SFILES} \ ${MFILES:T:S/.m$/.h/} -DEPENDFILES= .depend +DEPENDFILES= .depend .depend.* .if ${MK_FAST_DEPEND} == "yes" && \ (${.MAKE.MODE:Unormal:Mmeta} == "" || ${.MAKE.MODE:Unormal:Mnofilemon} != "") -DEPENDFILES+= .depend.* DEPEND_CFLAGS+= -MD -MP -MF.depend.${.TARGET} DEPEND_CFLAGS+= -MT${.TARGET} .if defined(.PARSEDIR) # Only add in DEPEND_CFLAGS for CFLAGS on files we expect from DEPENDOBJS # as those are the only ones we will include. DEPEND_CFLAGS_CONDITION= !empty(DEPENDOBJS:M${.TARGET}) CFLAGS+= ${${DEPEND_CFLAGS_CONDITION}:?${DEPEND_CFLAGS}:} .else CFLAGS+= ${DEPEND_CFLAGS} .endif DEPENDOBJS+= ${SYSTEM_OBJS} genassym.o DEPENDFILES_OBJS= ${DEPENDOBJS:O:u:C/^/.depend./} .if !defined(_SKIP_READ_DEPEND) .for __depend_obj in ${DEPENDFILES_OBJS} .sinclude "${__depend_obj}" .endfor .endif .endif # ${MK_FAST_DEPEND} == "yes" .NOPATH: .depend ${DEPENDFILES_OBJS} .depend: .PRECIOUS ${SRCS} .if ${MK_FAST_DEPEND} == "no" rm -f ${.TARGET}.tmp # C files ${MAKE} -V CFILES_NORMAL -V SYSTEM_CFILES -V GEN_CFILES | \ CC="${_MKDEPCC}" xargs mkdep -a -f ${.TARGET}.tmp ${CFLAGS} ${MAKE} -V CFILES_CDDL | \ CC="${_MKDEPCC}" xargs mkdep -a -f ${.TARGET}.tmp ${ZFS_CFLAGS} \ ${FBT_CFLAGS} ${DTRACE_CFLAGS} ${MAKE} -V CFILES_LINUXKPI | \ CC="${_MKDEPCC}" xargs mkdep -a -f ${.TARGET}.tmp \ ${CFLAGS} ${LINUXKPI_INCLUDES} ${MAKE} -V CFILES_OFED -V CFILES_MLX5 | \ CC="${_MKDEPCC}" xargs mkdep -a -f ${.TARGET}.tmp \ ${CFLAGS} ${OFEDINCLUDES} # Assembly files ${MAKE} -V SFILES_NORMAL | \ CC="${_MKDEPCC}" xargs mkdep -a -f ${.TARGET}.tmp ${ASM_CFLAGS} ${MAKE} -V SFILES_CDDL | \ CC="${_MKDEPCC}" xargs mkdep -a -f ${.TARGET}.tmp ${ZFS_ASM_CFLAGS} mv ${.TARGET}.tmp ${.TARGET} .else : > ${.TARGET} .endif _ILINKS= machine .if ${MACHINE} != ${MACHINE_CPUARCH} && ${MACHINE} != "arm64" _ILINKS+= ${MACHINE_CPUARCH} .endif .if ${MACHINE_CPUARCH} == "i386" || ${MACHINE_CPUARCH} == "amd64" _ILINKS+= x86 .endif # Ensure that the link exists without depending on it when it exists. .for _link in ${_ILINKS} .if !exists(${.OBJDIR}/${_link}) ${SRCS} ${CLEAN:M*.o}: ${_link} .endif .endfor ${_ILINKS}: @case ${.TARGET} in \ machine) \ path=${S}/${MACHINE}/include ;; \ *) \ path=${S}/${.TARGET}/include ;; \ esac ; \ ${ECHO} ${.TARGET} "->" $$path ; \ ln -s $$path ${.TARGET} # .depend needs include links so we remove them only together. kernel-cleandepend: .PHONY rm -f ${DEPENDFILES} ${_ILINKS} kernel-tags: @[ -f .depend ] || { echo "you must make depend first"; exit 1; } sh $S/conf/systags.sh kernel-install: @if [ ! -f ${KERNEL_KO} ] ; then \ echo "You must build a kernel first." ; \ exit 1 ; \ fi .if exists(${DESTDIR}${KODIR}) -thiskernel=`sysctl -n kern.bootfile` ; \ if [ ! "`dirname "$$thiskernel"`" -ef ${DESTDIR}${KODIR} ] ; then \ chflags -R noschg ${DESTDIR}${KODIR} ; \ rm -rf ${DESTDIR}${KODIR} ; \ rm -rf ${DESTDIR}${KERN_DEBUGDIR}${KODIR} ; \ else \ if [ -d ${DESTDIR}${KODIR}.old ] ; then \ chflags -R noschg ${DESTDIR}${KODIR}.old ; \ rm -rf ${DESTDIR}${KODIR}.old ; \ fi ; \ mv ${DESTDIR}${KODIR} ${DESTDIR}${KODIR}.old ; \ if [ -n "${KERN_DEBUGDIR}" -a \ -d ${DESTDIR}${KERN_DEBUGDIR}${KODIR} ]; then \ rm -rf ${DESTDIR}${KERN_DEBUGDIR}${KODIR}.old ; \ mv ${DESTDIR}${KERN_DEBUGDIR}${KODIR} ${DESTDIR}${KERN_DEBUGDIR}${KODIR}.old ; \ fi ; \ sysctl kern.bootfile=${DESTDIR}${KODIR}.old/"`basename "$$thiskernel"`" ; \ fi .endif mkdir -p ${DESTDIR}${KODIR} ${INSTALL} -p -m 555 -o ${KMODOWN} -g ${KMODGRP} ${KERNEL_KO} ${DESTDIR}${KODIR}/ .if defined(DEBUG) && !defined(INSTALL_NODEBUG) && ${MK_KERNEL_SYMBOLS} != "no" mkdir -p ${DESTDIR}${KERN_DEBUGDIR}${KODIR} ${INSTALL} -p -m 555 -o ${KMODOWN} -g ${KMODGRP} ${KERNEL_KO}.debug ${DESTDIR}${KERN_DEBUGDIR}${KODIR}/ .endif .if defined(KERNEL_EXTRA_INSTALL) ${INSTALL} -p -m 555 -o ${KMODOWN} -g ${KMODGRP} ${KERNEL_EXTRA_INSTALL} ${DESTDIR}${KODIR}/ .endif kernel-reinstall: @-chflags -R noschg ${DESTDIR}${KODIR} ${INSTALL} -p -m 555 -o ${KMODOWN} -g ${KMODGRP} ${KERNEL_KO} ${DESTDIR}${KODIR}/ .if defined(DEBUG) && !defined(INSTALL_NODEBUG) && ${MK_KERNEL_SYMBOLS} != "no" ${INSTALL} -p -m 555 -o ${KMODOWN} -g ${KMODGRP} ${KERNEL_KO}.debug ${DESTDIR}${KERN_DEBUGDIR}${KODIR}/ .endif config.o env.o hints.o vers.o vnode_if.o: ${NORMAL_C} ${NORMAL_CTFCONVERT} config.ln env.ln hints.ln vers.ln vnode_if.ln: ${NORMAL_LINT} vers.c: $S/conf/newvers.sh $S/sys/param.h ${SYSTEM_DEP} MAKE=${MAKE} sh $S/conf/newvers.sh ${KERN_IDENT} vnode_if.c: $S/tools/vnode_if.awk $S/kern/vnode_if.src ${AWK} -f $S/tools/vnode_if.awk $S/kern/vnode_if.src -c vnode_if.h vnode_if_newproto.h vnode_if_typedef.h: $S/tools/vnode_if.awk \ $S/kern/vnode_if.src vnode_if.h: vnode_if_newproto.h vnode_if_typedef.h ${AWK} -f $S/tools/vnode_if.awk $S/kern/vnode_if.src -h vnode_if_newproto.h: ${AWK} -f $S/tools/vnode_if.awk $S/kern/vnode_if.src -p vnode_if_typedef.h: ${AWK} -f $S/tools/vnode_if.awk $S/kern/vnode_if.src -q .if ${MFS_IMAGE:Uno} != "no" .if empty(MD_ROOT_SIZE_CONFIGURED) # Generate an object file from the file system image to embed in the kernel # via linking. Make sure the contents are in the mfs section and rename the # start/end/size variables to __start_mfs, __stop_mfs, and mfs_size, # respectively. embedfs_${MFS_IMAGE:T:R}.o: ${MFS_IMAGE} ${OBJCOPY} --input-target binary \ --output-target ${EMBEDFS_FORMAT.${MACHINE_ARCH}} \ --binary-architecture ${EMBEDFS_ARCH.${MACHINE_ARCH}} \ ${MFS_IMAGE} ${.TARGET} ${OBJCOPY} \ --rename-section .data=mfs,contents,alloc,load,readonly,data \ --redefine-sym \ _binary_${MFS_IMAGE:C,[^[:alnum:]],_,g}_size=__mfs_root_size \ --redefine-sym \ _binary_${MFS_IMAGE:C,[^[:alnum:]],_,g}_start=mfs_root \ --redefine-sym \ _binary_${MFS_IMAGE:C,[^[:alnum:]],_,g}_end=mfs_root_end \ ${.TARGET} .endif .endif # XXX strictly, everything depends on Makefile because changes to ${PROF} # only appear there, but we don't handle that. .include "kern.mk" Index: projects/release-pkg/sys/conf =================================================================== --- projects/release-pkg/sys/conf (revision 295925) +++ projects/release-pkg/sys/conf (revision 295926) Property changes on: projects/release-pkg/sys/conf ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/conf:r295886-295925 Index: projects/release-pkg/sys/dev/e1000/if_em.c =================================================================== --- projects/release-pkg/sys/dev/e1000/if_em.c (revision 295925) +++ projects/release-pkg/sys/dev/e1000/if_em.c (revision 295926) @@ -1,6232 +1,6234 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "opt_em.h" #include "opt_ddb.h" #include "opt_inet.h" #include "opt_inet6.h" #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #ifdef DDB #include #include #endif #if __FreeBSD_version >= 800000 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "e1000_api.h" #include "e1000_82571.h" #include "if_em.h" /********************************************************************* * Driver version: *********************************************************************/ char em_driver_version[] = "7.6.1-k"; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into e1000_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ static em_vendor_info_t em_vendor_info_array[] = { /* Intel(R) PRO/1000 Network Connection */ { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82572EI, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82583V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IFE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_82567V_3, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_M_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IFE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_BM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82574L, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82574LA, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_R_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_R_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_R_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_D_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_D_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_D_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_M_HV_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_M_HV_LC, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_D_HV_DM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_D_HV_DC, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH2_LV_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH2_LV_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_LPT_I217_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_LPT_I217_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_LPTLP_I218_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_LPTLP_I218_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_I218_LM2, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_I218_V2, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_I218_LM3, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_I218_V3, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_SPT_I219_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_SPT_I219_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_SPT_I219_LM2, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_SPT_I219_V2, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_LBG_I219_LM3, PCI_ANY_ID, PCI_ANY_ID, 0}, /* required last entry */ { 0, 0, 0, 0, 0} }; /********************************************************************* * Table of branding strings for all supported NICs. *********************************************************************/ static char *em_strings[] = { "Intel(R) PRO/1000 Network Connection" }; /********************************************************************* * Function prototypes *********************************************************************/ static int em_probe(device_t); static int em_attach(device_t); static int em_detach(device_t); static int em_shutdown(device_t); static int em_suspend(device_t); static int em_resume(device_t); #ifdef EM_MULTIQUEUE static int em_mq_start(if_t, struct mbuf *); static int em_mq_start_locked(if_t, struct tx_ring *); static void em_qflush(if_t); #else static void em_start(if_t); static void em_start_locked(if_t, struct tx_ring *); #endif static int em_ioctl(if_t, u_long, caddr_t); static uint64_t em_get_counter(if_t, ift_counter); static void em_init(void *); static void em_init_locked(struct adapter *); static void em_stop(void *); static void em_media_status(if_t, struct ifmediareq *); static int em_media_change(if_t); static void em_identify_hardware(struct adapter *); static int em_allocate_pci_resources(struct adapter *); static int em_allocate_legacy(struct adapter *); static int em_allocate_msix(struct adapter *); static int em_allocate_queues(struct adapter *); static int em_setup_msix(struct adapter *); static void em_free_pci_resources(struct adapter *); static void em_local_timer(void *); static void em_reset(struct adapter *); static int em_setup_interface(device_t, struct adapter *); static void em_flush_desc_rings(struct adapter *); static void em_setup_transmit_structures(struct adapter *); static void em_initialize_transmit_unit(struct adapter *); static int em_allocate_transmit_buffers(struct tx_ring *); static void em_free_transmit_structures(struct adapter *); static void em_free_transmit_buffers(struct tx_ring *); static int em_setup_receive_structures(struct adapter *); static int em_allocate_receive_buffers(struct rx_ring *); static void em_initialize_receive_unit(struct adapter *); static void em_free_receive_structures(struct adapter *); static void em_free_receive_buffers(struct rx_ring *); static void em_enable_intr(struct adapter *); static void em_disable_intr(struct adapter *); static void em_update_stats_counters(struct adapter *); static void em_add_hw_stats(struct adapter *adapter); static void em_txeof(struct tx_ring *); static bool em_rxeof(struct rx_ring *, int, int *); #ifndef __NO_STRICT_ALIGNMENT static int em_fixup_rx(struct rx_ring *); #endif static void em_setup_rxdesc(union e1000_rx_desc_extended *, const struct em_rxbuffer *rxbuf); static void em_receive_checksum(uint32_t status, struct mbuf *); static void em_transmit_checksum_setup(struct tx_ring *, struct mbuf *, int, struct ip *, u32 *, u32 *); static void em_tso_setup(struct tx_ring *, struct mbuf *, int, struct ip *, struct tcphdr *, u32 *, u32 *); static void em_set_promisc(struct adapter *); static void em_disable_promisc(struct adapter *); static void em_set_multi(struct adapter *); static void em_update_link_status(struct adapter *); static void em_refresh_mbufs(struct rx_ring *, int); static void em_register_vlan(void *, if_t, u16); static void em_unregister_vlan(void *, if_t, u16); static void em_setup_vlan_hw_support(struct adapter *); static int em_xmit(struct tx_ring *, struct mbuf **); static int em_dma_malloc(struct adapter *, bus_size_t, struct em_dma_alloc *, int); static void em_dma_free(struct adapter *, struct em_dma_alloc *); static int em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); static void em_print_nvm_info(struct adapter *); static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS); static void em_print_debug_info(struct adapter *); static int em_is_valid_ether_addr(u8 *); static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS); static void em_add_int_delay_sysctl(struct adapter *, const char *, const char *, struct em_int_delay_info *, int, int); /* Management and WOL Support */ static void em_init_manageability(struct adapter *); static void em_release_manageability(struct adapter *); static void em_get_hw_control(struct adapter *); static void em_release_hw_control(struct adapter *); static void em_get_wakeup(device_t); static void em_enable_wakeup(device_t); static int em_enable_phy_wakeup(struct adapter *); static void em_led_func(void *, int); static void em_disable_aspm(struct adapter *); static int em_irq_fast(void *); /* MSIX handlers */ static void em_msix_tx(void *); static void em_msix_rx(void *); static void em_msix_link(void *); static void em_handle_tx(void *context, int pending); static void em_handle_rx(void *context, int pending); static void em_handle_link(void *context, int pending); #ifdef EM_MULTIQUEUE static void em_enable_vectors_82574(struct adapter *); #endif static void em_set_sysctl_value(struct adapter *, const char *, const char *, int *, int); static int em_set_flowcntl(SYSCTL_HANDLER_ARGS); static int em_sysctl_eee(SYSCTL_HANDLER_ARGS); static __inline void em_rx_discard(struct rx_ring *, int); #ifdef DEVICE_POLLING static poll_handler_t em_poll; #endif /* POLLING */ /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t em_methods[] = { /* Device interface */ DEVMETHOD(device_probe, em_probe), DEVMETHOD(device_attach, em_attach), DEVMETHOD(device_detach, em_detach), DEVMETHOD(device_shutdown, em_shutdown), DEVMETHOD(device_suspend, em_suspend), DEVMETHOD(device_resume, em_resume), DEVMETHOD_END }; static driver_t em_driver = { "em", em_methods, sizeof(struct adapter), }; devclass_t em_devclass; DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0); MODULE_DEPEND(em, pci, 1, 1, 1); MODULE_DEPEND(em, ether, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(em, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ /********************************************************************* * Tunable default values. *********************************************************************/ #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000) #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024) #define M_TSO_LEN 66 #define MAX_INTS_PER_SEC 8000 #define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256)) /* Allow common code without TSO */ #ifndef CSUM_TSO #define CSUM_TSO 0 #endif #define TSO_WORKAROUND 4 static SYSCTL_NODE(_hw, OID_AUTO, em, CTLFLAG_RD, 0, "EM driver parameters"); static int em_disable_crc_stripping = 0; SYSCTL_INT(_hw_em, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN, &em_disable_crc_stripping, 0, "Disable CRC Stripping"); static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV); static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR); SYSCTL_INT(_hw_em, OID_AUTO, tx_int_delay, CTLFLAG_RDTUN, &em_tx_int_delay_dflt, 0, "Default transmit interrupt delay in usecs"); SYSCTL_INT(_hw_em, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &em_rx_int_delay_dflt, 0, "Default receive interrupt delay in usecs"); static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV); static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV); SYSCTL_INT(_hw_em, OID_AUTO, tx_abs_int_delay, CTLFLAG_RDTUN, &em_tx_abs_int_delay_dflt, 0, "Default transmit interrupt delay limit in usecs"); SYSCTL_INT(_hw_em, OID_AUTO, rx_abs_int_delay, CTLFLAG_RDTUN, &em_rx_abs_int_delay_dflt, 0, "Default receive interrupt delay limit in usecs"); static int em_rxd = EM_DEFAULT_RXD; static int em_txd = EM_DEFAULT_TXD; SYSCTL_INT(_hw_em, OID_AUTO, rxd, CTLFLAG_RDTUN, &em_rxd, 0, "Number of receive descriptors per queue"); SYSCTL_INT(_hw_em, OID_AUTO, txd, CTLFLAG_RDTUN, &em_txd, 0, "Number of transmit descriptors per queue"); static int em_smart_pwr_down = FALSE; SYSCTL_INT(_hw_em, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &em_smart_pwr_down, 0, "Set to true to leave smart power down enabled on newer adapters"); /* Controls whether promiscuous also shows bad packets */ static int em_debug_sbp = FALSE; SYSCTL_INT(_hw_em, OID_AUTO, sbp, CTLFLAG_RDTUN, &em_debug_sbp, 0, "Show bad packets in promiscuous mode"); static int em_enable_msix = TRUE; SYSCTL_INT(_hw_em, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &em_enable_msix, 0, "Enable MSI-X interrupts"); #ifdef EM_MULTIQUEUE static int em_num_queues = 1; SYSCTL_INT(_hw_em, OID_AUTO, num_queues, CTLFLAG_RDTUN, &em_num_queues, 0, "82574 only: Number of queues to configure, 0 indicates autoconfigure"); #endif /* ** Global variable to store last used CPU when binding queues ** to CPUs in igb_allocate_msix. Starts at CPU_FIRST and increments when a ** queue is bound to a cpu. */ static int em_last_bind_cpu = -1; /* How many packets rxeof tries to clean at a time */ static int em_rx_process_limit = 100; SYSCTL_INT(_hw_em, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, &em_rx_process_limit, 0, "Maximum number of received packets to process " "at a time, -1 means unlimited"); /* Energy efficient ethernet - default to OFF */ static int eee_setting = 1; SYSCTL_INT(_hw_em, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &eee_setting, 0, "Enable Energy Efficient Ethernet"); /* Global used in WOL setup with multiport cards */ static int global_quad_port_a = 0; #ifdef DEV_NETMAP /* see ixgbe.c for details */ #include #endif /* DEV_NETMAP */ /********************************************************************* * Device identification routine * * em_probe determines if the driver should be loaded on * adapter based on PCI vendor/device id of the adapter. * * return BUS_PROBE_DEFAULT on success, positive on failure *********************************************************************/ static int em_probe(device_t dev) { char adapter_name[60]; uint16_t pci_vendor_id = 0; uint16_t pci_device_id = 0; uint16_t pci_subvendor_id = 0; uint16_t pci_subdevice_id = 0; em_vendor_info_t *ent; INIT_DEBUGOUT("em_probe: begin"); pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != EM_VENDOR_ID) return (ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = em_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || (ent->subvendor_id == PCI_ANY_ID)) && ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == PCI_ANY_ID))) { sprintf(adapter_name, "%s %s", em_strings[ent->index], em_driver_version); device_set_desc_copy(dev, adapter_name); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int em_attach(device_t dev) { struct adapter *adapter; struct e1000_hw *hw; int error = 0; INIT_DEBUGOUT("em_attach: begin"); if (resource_disabled("em", device_get_unit(dev))) { device_printf(dev, "Disabled by device hint\n"); return (ENXIO); } adapter = device_get_softc(dev); adapter->dev = adapter->osdep.dev = dev; hw = &adapter->hw; EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); /* SYSCTL stuff */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_sysctl_nvm_info, "I", "NVM Information"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_sysctl_debug_info, "I", "Debug Information"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fc", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_set_flowcntl, "I", "Flow Control"); callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); /* Determine hardware and mac info */ em_identify_hardware(adapter); /* Setup PCI resources */ if (em_allocate_pci_resources(adapter)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_pci; } /* ** For ICH8 and family we need to ** map the flash memory, and this ** must happen after the MAC is ** identified */ if ((hw->mac.type == e1000_ich8lan) || (hw->mac.type == e1000_ich9lan) || (hw->mac.type == e1000_ich10lan) || (hw->mac.type == e1000_pchlan) || (hw->mac.type == e1000_pch2lan) || (hw->mac.type == e1000_pch_lpt)) { int rid = EM_BAR_TYPE_FLASH; adapter->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->flash == NULL) { device_printf(dev, "Mapping of Flash failed\n"); error = ENXIO; goto err_pci; } /* This is used in the shared code */ hw->flash_address = (u8 *)adapter->flash; adapter->osdep.flash_bus_space_tag = rman_get_bustag(adapter->flash); adapter->osdep.flash_bus_space_handle = rman_get_bushandle(adapter->flash); } /* ** In the new SPT device flash is not a ** seperate BAR, rather it is also in BAR0, ** so use the same tag and an offset handle for the ** FLASH read/write macros in the shared code. */ else if (hw->mac.type == e1000_pch_spt) { adapter->osdep.flash_bus_space_tag = adapter->osdep.mem_bus_space_tag; adapter->osdep.flash_bus_space_handle = adapter->osdep.mem_bus_space_handle + E1000_FLASH_BASE_ADDR; } /* Do Shared Code initialization */ error = e1000_setup_init_funcs(hw, TRUE); if (error) { device_printf(dev, "Setup of Shared code failed, error %d\n", error); error = ENXIO; goto err_pci; } /* * Setup MSI/X or MSI if PCI Express */ adapter->msix = em_setup_msix(adapter); e1000_get_bus_info(hw); /* Set up some sysctls for the tunable interrupt delays */ em_add_int_delay_sysctl(adapter, "rx_int_delay", "receive interrupt delay in usecs", &adapter->rx_int_delay, E1000_REGISTER(hw, E1000_RDTR), em_rx_int_delay_dflt); em_add_int_delay_sysctl(adapter, "tx_int_delay", "transmit interrupt delay in usecs", &adapter->tx_int_delay, E1000_REGISTER(hw, E1000_TIDV), em_tx_int_delay_dflt); em_add_int_delay_sysctl(adapter, "rx_abs_int_delay", "receive interrupt delay limit in usecs", &adapter->rx_abs_int_delay, E1000_REGISTER(hw, E1000_RADV), em_rx_abs_int_delay_dflt); em_add_int_delay_sysctl(adapter, "tx_abs_int_delay", "transmit interrupt delay limit in usecs", &adapter->tx_abs_int_delay, E1000_REGISTER(hw, E1000_TADV), em_tx_abs_int_delay_dflt); em_add_int_delay_sysctl(adapter, "itr", "interrupt delay limit in usecs/4", &adapter->tx_itr, E1000_REGISTER(hw, E1000_ITR), DEFAULT_ITR); /* Sysctl for limiting the amount of work done in the taskqueue */ em_set_sysctl_value(adapter, "rx_processing_limit", "max number of rx packets to process", &adapter->rx_process_limit, em_rx_process_limit); /* * Validate number of transmit and receive descriptors. It * must not exceed hardware maximum, and must be multiple * of E1000_DBA_ALIGN. */ if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 || (em_txd > EM_MAX_TXD) || (em_txd < EM_MIN_TXD)) { device_printf(dev, "Using %d TX descriptors instead of %d!\n", EM_DEFAULT_TXD, em_txd); adapter->num_tx_desc = EM_DEFAULT_TXD; } else adapter->num_tx_desc = em_txd; if (((em_rxd * sizeof(union e1000_rx_desc_extended)) % EM_DBA_ALIGN) != 0 || (em_rxd > EM_MAX_RXD) || (em_rxd < EM_MIN_RXD)) { device_printf(dev, "Using %d RX descriptors instead of %d!\n", EM_DEFAULT_RXD, em_rxd); adapter->num_rx_desc = EM_DEFAULT_RXD; } else adapter->num_rx_desc = em_rxd; hw->mac.autoneg = DO_AUTO_NEG; hw->phy.autoneg_wait_to_complete = FALSE; hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; /* Copper options */ if (hw->phy.media_type == e1000_media_type_copper) { hw->phy.mdix = AUTO_ALL_MODES; hw->phy.disable_polarity_correction = FALSE; hw->phy.ms_type = EM_MASTER_SLAVE; } /* * Set the frame limits assuming * standard ethernet sized frames. */ adapter->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; /* * This controls when hardware reports transmit completion * status. */ hw->mac.report_tx_early = 1; /* ** Get queue/ring memory */ if (em_allocate_queues(adapter)) { error = ENOMEM; goto err_pci; } /* Allocate multicast array memory. */ adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); if (adapter->mta == NULL) { device_printf(dev, "Can not allocate multicast setup array\n"); error = ENOMEM; goto err_late; } /* Check SOL/IDER usage */ if (e1000_check_reset_block(hw)) device_printf(dev, "PHY reset is blocked" " due to SOL/IDER session.\n"); /* Sysctl for setting Energy Efficient Ethernet */ hw->dev_spec.ich8lan.eee_disable = eee_setting; SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "eee_control", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_sysctl_eee, "I", "Disable Energy Efficient Ethernet"); /* ** Start from a known state, this is ** important in reading the nvm and ** mac from that. */ e1000_reset_hw(hw); /* Make sure we have a good EEPROM before we read from it */ if (e1000_validate_nvm_checksum(hw) < 0) { /* ** Some PCI-E parts fail the first check due to ** the link being in sleep state, call it again, ** if it fails a second time its a real issue. */ if (e1000_validate_nvm_checksum(hw) < 0) { device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); error = EIO; goto err_late; } } /* Copy the permanent MAC address out of the EEPROM */ if (e1000_read_mac_addr(hw) < 0) { device_printf(dev, "EEPROM read error while reading MAC" " address\n"); error = EIO; goto err_late; } if (!em_is_valid_ether_addr(hw->mac.addr)) { device_printf(dev, "Invalid MAC address\n"); error = EIO; goto err_late; } /* Disable ULP support */ e1000_disable_ulp_lpt_lp(hw, TRUE); /* ** Do interrupt configuration */ if (adapter->msix > 1) /* Do MSIX */ error = em_allocate_msix(adapter); else /* MSI or Legacy */ error = em_allocate_legacy(adapter); if (error) goto err_late; /* * Get Wake-on-Lan and Management info for later use */ em_get_wakeup(dev); /* Setup OS specific network interface */ if (em_setup_interface(dev, adapter) != 0) goto err_late; em_reset(adapter); /* Initialize statistics */ em_update_stats_counters(adapter); hw->mac.get_link_status = 1; em_update_link_status(adapter); /* Register for VLAN events */ adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, em_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, em_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); em_add_hw_stats(adapter); /* Non-AMT based hardware can now take control from firmware */ if (adapter->has_manage && !adapter->has_amt) em_get_hw_control(adapter); /* Tell the stack that the interface is not active */ if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); adapter->led_dev = led_create(em_led_func, adapter, device_get_nameunit(dev)); #ifdef DEV_NETMAP em_netmap_attach(adapter); #endif /* DEV_NETMAP */ INIT_DEBUGOUT("em_attach: end"); return (0); err_late: em_free_transmit_structures(adapter); em_free_receive_structures(adapter); em_release_hw_control(adapter); if (adapter->ifp != (void *)NULL) if_free(adapter->ifp); err_pci: em_free_pci_resources(adapter); free(adapter->mta, M_DEVBUF); EM_CORE_LOCK_DESTROY(adapter); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int em_detach(device_t dev) { struct adapter *adapter = device_get_softc(dev); if_t ifp = adapter->ifp; INIT_DEBUGOUT("em_detach: begin"); /* Make sure VLANS are not using driver */ if (if_vlantrunkinuse(ifp)) { device_printf(dev,"Vlan in use, detach first\n"); return (EBUSY); } #ifdef DEVICE_POLLING if (if_getcapenable(ifp) & IFCAP_POLLING) ether_poll_deregister(ifp); #endif if (adapter->led_dev != NULL) led_destroy(adapter->led_dev); EM_CORE_LOCK(adapter); adapter->in_detach = 1; em_stop(adapter); EM_CORE_UNLOCK(adapter); EM_CORE_LOCK_DESTROY(adapter); e1000_phy_hw_reset(&adapter->hw); em_release_manageability(adapter); em_release_hw_control(adapter); /* Unregister VLAN events */ if (adapter->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); if (adapter->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); ether_ifdetach(adapter->ifp); callout_drain(&adapter->timer); #ifdef DEV_NETMAP netmap_detach(ifp); #endif /* DEV_NETMAP */ em_free_pci_resources(adapter); bus_generic_detach(dev); if_free(ifp); em_free_transmit_structures(adapter); em_free_receive_structures(adapter); em_release_hw_control(adapter); free(adapter->mta, M_DEVBUF); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int em_shutdown(device_t dev) { return em_suspend(dev); } /* * Suspend/resume device methods. */ static int em_suspend(device_t dev) { struct adapter *adapter = device_get_softc(dev); EM_CORE_LOCK(adapter); em_release_manageability(adapter); em_release_hw_control(adapter); em_enable_wakeup(dev); EM_CORE_UNLOCK(adapter); return bus_generic_suspend(dev); } static int em_resume(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct tx_ring *txr = adapter->tx_rings; if_t ifp = adapter->ifp; EM_CORE_LOCK(adapter); if (adapter->hw.mac.type == e1000_pch2lan) e1000_resume_workarounds_pchlan(&adapter->hw); em_init_locked(adapter); em_init_manageability(adapter); if ((if_getflags(ifp) & IFF_UP) && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) && adapter->link_active) { for (int i = 0; i < adapter->num_queues; i++, txr++) { EM_TX_LOCK(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr); #else if (!if_sendq_empty(ifp)) em_start_locked(ifp, txr); #endif EM_TX_UNLOCK(txr); } } EM_CORE_UNLOCK(adapter); return bus_generic_resume(dev); } #ifndef EM_MULTIQUEUE static void em_start_locked(if_t ifp, struct tx_ring *txr) { struct adapter *adapter = if_getsoftc(ifp); struct mbuf *m_head; EM_TX_LOCK_ASSERT(txr); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; if (!adapter->link_active) return; while (!if_sendq_empty(ifp)) { /* Call cleanup if number of TX descriptors low */ if (txr->tx_avail <= EM_TX_CLEANUP_THRESHOLD) em_txeof(txr); if (txr->tx_avail < EM_MAX_SCATTER) { if_setdrvflagbits(ifp,IFF_DRV_OACTIVE, 0); break; } m_head = if_dequeue(ifp); if (m_head == NULL) break; /* * Encapsulation can modify our pointer, and or make it * NULL on failure. In that event, we can't requeue. */ if (em_xmit(txr, &m_head)) { if (m_head == NULL) break; if_sendq_prepend(ifp, m_head); break; } /* Mark the queue as having work */ if (txr->busy == EM_TX_IDLE) txr->busy = EM_TX_BUSY; /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, m_head); } return; } static void em_start(if_t ifp) { struct adapter *adapter = if_getsoftc(ifp); struct tx_ring *txr = adapter->tx_rings; if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { EM_TX_LOCK(txr); em_start_locked(ifp, txr); EM_TX_UNLOCK(txr); } return; } #else /* EM_MULTIQUEUE */ /********************************************************************* * Multiqueue Transmit routines * * em_mq_start is called by the stack to initiate a transmit. * however, if busy the driver can queue the request rather * than do an immediate send. It is this that is an advantage * in this driver, rather than also having multiple tx queues. **********************************************************************/ /* ** Multiqueue capable stack interface */ static int em_mq_start(if_t ifp, struct mbuf *m) { struct adapter *adapter = if_getsoftc(ifp); struct tx_ring *txr = adapter->tx_rings; unsigned int i, error; if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) i = m->m_pkthdr.flowid % adapter->num_queues; else i = curcpu % adapter->num_queues; txr = &adapter->tx_rings[i]; error = drbr_enqueue(ifp, txr->br, m); if (error) return (error); if (EM_TX_TRYLOCK(txr)) { em_mq_start_locked(ifp, txr); EM_TX_UNLOCK(txr); } else taskqueue_enqueue(txr->tq, &txr->tx_task); return (0); } static int em_mq_start_locked(if_t ifp, struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct mbuf *next; int err = 0, enq = 0; EM_TX_LOCK_ASSERT(txr); if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) || adapter->link_active == 0) { return (ENETDOWN); } /* Process the queue */ while ((next = drbr_peek(ifp, txr->br)) != NULL) { if ((err = em_xmit(txr, &next)) != 0) { if (next == NULL) { /* It was freed, move forward */ drbr_advance(ifp, txr->br); } else { /* * Still have one left, it may not be * the same since the transmit function * may have changed it. */ drbr_putback(ifp, txr->br, next); } break; } drbr_advance(ifp, txr->br); enq++; if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len); if (next->m_flags & M_MCAST) if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); ETHER_BPF_MTAP(ifp, next); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) break; } /* Mark the queue as having work */ if ((enq > 0) && (txr->busy == EM_TX_IDLE)) txr->busy = EM_TX_BUSY; if (txr->tx_avail < EM_MAX_SCATTER) em_txeof(txr); if (txr->tx_avail < EM_MAX_SCATTER) { if_setdrvflagbits(ifp, IFF_DRV_OACTIVE,0); } return (err); } /* ** Flush all ring buffers */ static void em_qflush(if_t ifp) { struct adapter *adapter = if_getsoftc(ifp); struct tx_ring *txr = adapter->tx_rings; struct mbuf *m; for (int i = 0; i < adapter->num_queues; i++, txr++) { EM_TX_LOCK(txr); while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) m_freem(m); EM_TX_UNLOCK(txr); } if_qflush(ifp); } #endif /* EM_MULTIQUEUE */ /********************************************************************* * Ioctl entry point * * em_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ static int em_ioctl(if_t ifp, u_long command, caddr_t data) { struct adapter *adapter = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; #endif bool avoid_reset = FALSE; int error = 0; if (adapter->in_detach) return (error); switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { if_setflagbits(ifp,IFF_UP,0); if (!(if_getdrvflags(ifp)& IFF_DRV_RUNNING)) em_init(adapter); #ifdef INET if (!(if_getflags(ifp) & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else error = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: { int max_frame_size; IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); EM_CORE_LOCK(adapter); switch (adapter->hw.mac.type) { case e1000_82571: case e1000_82572: case e1000_ich9lan: case e1000_ich10lan: case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: case e1000_82574: case e1000_82583: case e1000_80003es2lan: /* 9K Jumbo Frame size */ max_frame_size = 9234; break; case e1000_pchlan: max_frame_size = 4096; break; /* Adapters that do not support jumbo frames */ case e1000_ich8lan: max_frame_size = ETHER_MAX_LEN; break; default: max_frame_size = MAX_JUMBO_FRAME_SIZE; } if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) { EM_CORE_UNLOCK(adapter); error = EINVAL; break; } if_setmtu(ifp, ifr->ifr_mtu); adapter->hw.mac.max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; em_init_locked(adapter); EM_CORE_UNLOCK(adapter); break; } case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl rcv'd:\ SIOCSIFFLAGS (Set Interface Flags)"); EM_CORE_LOCK(adapter); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if ((if_getflags(ifp) ^ adapter->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { em_disable_promisc(adapter); em_set_promisc(adapter); } } else em_init_locked(adapter); } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) em_stop(adapter); adapter->if_flags = if_getflags(ifp); EM_CORE_UNLOCK(adapter); break; case SIOCADDMULTI: case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { EM_CORE_LOCK(adapter); em_disable_intr(adapter); em_set_multi(adapter); #ifdef DEVICE_POLLING if (!(if_getcapenable(ifp) & IFCAP_POLLING)) #endif em_enable_intr(adapter); EM_CORE_UNLOCK(adapter); } break; case SIOCSIFMEDIA: /* Check SOL/IDER usage */ EM_CORE_LOCK(adapter); if (e1000_check_reset_block(&adapter->hw)) { EM_CORE_UNLOCK(adapter); device_printf(adapter->dev, "Media change is" " blocked due to SOL/IDER session.\n"); break; } EM_CORE_UNLOCK(adapter); /* falls thru */ case SIOCGIFMEDIA: IOCTL_DEBUGOUT("ioctl rcv'd: \ SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: { int mask, reinit; IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); reinit = 0; mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(em_poll, ifp); if (error) return (error); EM_CORE_LOCK(adapter); em_disable_intr(adapter); if_setcapenablebit(ifp, IFCAP_POLLING, 0); EM_CORE_UNLOCK(adapter); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ EM_CORE_LOCK(adapter); em_enable_intr(adapter); if_setcapenablebit(ifp, 0, IFCAP_POLLING); EM_CORE_UNLOCK(adapter); } } #endif if (mask & IFCAP_HWCSUM) { if_togglecapenable(ifp,IFCAP_HWCSUM); reinit = 1; } if (mask & IFCAP_TSO4) { if_togglecapenable(ifp,IFCAP_TSO4); reinit = 1; } if (mask & IFCAP_VLAN_HWTAGGING) { if_togglecapenable(ifp,IFCAP_VLAN_HWTAGGING); reinit = 1; } if (mask & IFCAP_VLAN_HWFILTER) { if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER); reinit = 1; } if (mask & IFCAP_VLAN_HWTSO) { if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); reinit = 1; } if ((mask & IFCAP_WOL) && (if_getcapabilities(ifp) & IFCAP_WOL) != 0) { if (mask & IFCAP_WOL_MCAST) if_togglecapenable(ifp, IFCAP_WOL_MCAST); if (mask & IFCAP_WOL_MAGIC) if_togglecapenable(ifp, IFCAP_WOL_MAGIC); } if (reinit && (if_getdrvflags(ifp) & IFF_DRV_RUNNING)) em_init(adapter); if_vlancap(ifp); break; } default: error = ether_ioctl(ifp, command, data); break; } return (error); } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * * return 0 on success, positive on failure **********************************************************************/ static void em_init_locked(struct adapter *adapter) { if_t ifp = adapter->ifp; device_t dev = adapter->dev; INIT_DEBUGOUT("em_init: begin"); EM_CORE_LOCK_ASSERT(adapter); em_disable_intr(adapter); callout_stop(&adapter->timer); /* Get the latest mac address, User can use a LAA */ bcopy(if_getlladdr(adapter->ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN); /* Put the address into the Receive Address Array */ e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); /* * With the 82571 adapter, RAR[0] may be overwritten * when the other port is reset, we make a duplicate * in RAR[14] for that eventuality, this assures * the interface continues to function. */ if (adapter->hw.mac.type == e1000_82571) { e1000_set_laa_state_82571(&adapter->hw, TRUE); e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, E1000_RAR_ENTRIES - 1); } /* Initialize the hardware */ em_reset(adapter); em_update_link_status(adapter); /* Setup VLAN support, basic and offload if available */ E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); /* Set hardware offload abilities */ if_clearhwassist(ifp); if (if_getcapenable(ifp) & IFCAP_TXCSUM) if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0); /* ** There have proven to be problems with TSO when not ** at full gigabit speed, so disable the assist automatically ** when at lower speeds. -jfv */ if (if_getcapenable(ifp) & IFCAP_TSO4) { if (adapter->link_speed == SPEED_1000) if_sethwassistbits(ifp, CSUM_TSO, 0); } /* Configure for OS presence */ em_init_manageability(adapter); /* Prepare transmit descriptors and buffers */ em_setup_transmit_structures(adapter); em_initialize_transmit_unit(adapter); /* Setup Multicast table */ em_set_multi(adapter); /* ** Figure out the desired mbuf ** pool for doing jumbos */ if (adapter->hw.mac.max_frame_size <= 2048) adapter->rx_mbuf_sz = MCLBYTES; else if (adapter->hw.mac.max_frame_size <= 4096) adapter->rx_mbuf_sz = MJUMPAGESIZE; else adapter->rx_mbuf_sz = MJUM9BYTES; /* Prepare receive descriptors and buffers */ if (em_setup_receive_structures(adapter)) { device_printf(dev, "Could not setup receive structures\n"); em_stop(adapter); return; } em_initialize_receive_unit(adapter); /* Use real VLAN Filter support? */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) { if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) /* Use real VLAN Filter support */ em_setup_vlan_hw_support(adapter); else { u32 ctrl; ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); ctrl |= E1000_CTRL_VME; E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); } } /* Don't lose promiscuous settings */ em_set_promisc(adapter); /* Set the interface as ACTIVE */ if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); callout_reset(&adapter->timer, hz, em_local_timer, adapter); e1000_clear_hw_cntrs_base_generic(&adapter->hw); /* MSI/X configuration for 82574 */ if (adapter->hw.mac.type == e1000_82574) { int tmp; tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); tmp |= E1000_CTRL_EXT_PBA_CLR; E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp); /* Set the IVAR - interrupt vector routing. */ E1000_WRITE_REG(&adapter->hw, E1000_IVAR, adapter->ivars); } #ifdef DEVICE_POLLING /* * Only enable interrupts if we are not polling, make sure * they are off otherwise. */ if (if_getcapenable(ifp) & IFCAP_POLLING) em_disable_intr(adapter); else #endif /* DEVICE_POLLING */ em_enable_intr(adapter); /* AMT based hardware can now take control from firmware */ if (adapter->has_manage && adapter->has_amt) em_get_hw_control(adapter); } static void em_init(void *arg) { struct adapter *adapter = arg; EM_CORE_LOCK(adapter); em_init_locked(adapter); EM_CORE_UNLOCK(adapter); } #ifdef DEVICE_POLLING /********************************************************************* * * Legacy polling routine: note this only works with single queue * *********************************************************************/ static int em_poll(if_t ifp, enum poll_cmd cmd, int count) { struct adapter *adapter = if_getsoftc(ifp); struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; u32 reg_icr; int rx_done; EM_CORE_LOCK(adapter); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { EM_CORE_UNLOCK(adapter); return (0); } if (cmd == POLL_AND_CHECK_STATUS) { reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { callout_stop(&adapter->timer); adapter->hw.mac.get_link_status = 1; em_update_link_status(adapter); callout_reset(&adapter->timer, hz, em_local_timer, adapter); } } EM_CORE_UNLOCK(adapter); em_rxeof(rxr, count, &rx_done); EM_TX_LOCK(txr); em_txeof(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr); #else if (!if_sendq_empty(ifp)) em_start_locked(ifp, txr); #endif EM_TX_UNLOCK(txr); return (rx_done); } #endif /* DEVICE_POLLING */ /********************************************************************* * * Fast Legacy/MSI Combined Interrupt Service routine * *********************************************************************/ static int em_irq_fast(void *arg) { struct adapter *adapter = arg; if_t ifp; u32 reg_icr; ifp = adapter->ifp; reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); /* Hot eject? */ if (reg_icr == 0xffffffff) return FILTER_STRAY; /* Definitely not our interrupt. */ if (reg_icr == 0x0) return FILTER_STRAY; /* * Starting with the 82571 chip, bit 31 should be used to * determine whether the interrupt belongs to us. */ if (adapter->hw.mac.type >= e1000_82571 && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) return FILTER_STRAY; em_disable_intr(adapter); taskqueue_enqueue(adapter->tq, &adapter->que_task); /* Link status change */ if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { adapter->hw.mac.get_link_status = 1; taskqueue_enqueue(taskqueue_fast, &adapter->link_task); } if (reg_icr & E1000_ICR_RXO) adapter->rx_overruns++; return FILTER_HANDLED; } /* Combined RX/TX handler, used by Legacy and MSI */ static void em_handle_que(void *context, int pending) { struct adapter *adapter = context; if_t ifp = adapter->ifp; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { bool more = em_rxeof(rxr, adapter->rx_process_limit, NULL); EM_TX_LOCK(txr); em_txeof(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr); #else if (!if_sendq_empty(ifp)) em_start_locked(ifp, txr); #endif EM_TX_UNLOCK(txr); if (more) { taskqueue_enqueue(adapter->tq, &adapter->que_task); return; } } em_enable_intr(adapter); return; } /********************************************************************* * * MSIX Interrupt Service Routines * **********************************************************************/ static void em_msix_tx(void *arg) { struct tx_ring *txr = arg; struct adapter *adapter = txr->adapter; if_t ifp = adapter->ifp; ++txr->tx_irq; EM_TX_LOCK(txr); em_txeof(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr); #else if (!if_sendq_empty(ifp)) em_start_locked(ifp, txr); #endif /* Reenable this interrupt */ E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims); EM_TX_UNLOCK(txr); return; } /********************************************************************* * * MSIX RX Interrupt Service routine * **********************************************************************/ static void em_msix_rx(void *arg) { struct rx_ring *rxr = arg; struct adapter *adapter = rxr->adapter; bool more; ++rxr->rx_irq; if (!(if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING)) return; more = em_rxeof(rxr, adapter->rx_process_limit, NULL); if (more) taskqueue_enqueue(rxr->tq, &rxr->rx_task); else { /* Reenable this interrupt */ E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims); } return; } /********************************************************************* * * MSIX Link Fast Interrupt Service routine * **********************************************************************/ static void em_msix_link(void *arg) { struct adapter *adapter = arg; u32 reg_icr; ++adapter->link_irq; reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); if (reg_icr & E1000_ICR_RXO) adapter->rx_overruns++; if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { adapter->hw.mac.get_link_status = 1; em_handle_link(adapter, 0); } else E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_LINK | E1000_IMS_LSC); /* ** Because we must read the ICR for this interrupt ** it may clear other causes using autoclear, for ** this reason we simply create a soft interrupt ** for all these vectors. */ if (reg_icr) { E1000_WRITE_REG(&adapter->hw, E1000_ICS, adapter->ims); } return; } static void em_handle_rx(void *context, int pending) { struct rx_ring *rxr = context; struct adapter *adapter = rxr->adapter; bool more; more = em_rxeof(rxr, adapter->rx_process_limit, NULL); if (more) taskqueue_enqueue(rxr->tq, &rxr->rx_task); else { /* Reenable this interrupt */ E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims); } } static void em_handle_tx(void *context, int pending) { struct tx_ring *txr = context; struct adapter *adapter = txr->adapter; if_t ifp = adapter->ifp; EM_TX_LOCK(txr); em_txeof(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr); #else if (!if_sendq_empty(ifp)) em_start_locked(ifp, txr); #endif E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims); EM_TX_UNLOCK(txr); } static void em_handle_link(void *context, int pending) { struct adapter *adapter = context; struct tx_ring *txr = adapter->tx_rings; if_t ifp = adapter->ifp; if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) return; EM_CORE_LOCK(adapter); callout_stop(&adapter->timer); em_update_link_status(adapter); callout_reset(&adapter->timer, hz, em_local_timer, adapter); E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_LINK | E1000_IMS_LSC); if (adapter->link_active) { for (int i = 0; i < adapter->num_queues; i++, txr++) { EM_TX_LOCK(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr); #else if (if_sendq_empty(ifp)) em_start_locked(ifp, txr); #endif EM_TX_UNLOCK(txr); } } EM_CORE_UNLOCK(adapter); } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void em_media_status(if_t ifp, struct ifmediareq *ifmr) { struct adapter *adapter = if_getsoftc(ifp); u_char fiber_type = IFM_1000_SX; INIT_DEBUGOUT("em_media_status: begin"); EM_CORE_LOCK(adapter); em_update_link_status(adapter); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->link_active) { EM_CORE_UNLOCK(adapter); return; } ifmr->ifm_status |= IFM_ACTIVE; if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { ifmr->ifm_active |= fiber_type | IFM_FDX; } else { switch (adapter->link_speed) { case 10: ifmr->ifm_active |= IFM_10_T; break; case 100: ifmr->ifm_active |= IFM_100_TX; break; case 1000: ifmr->ifm_active |= IFM_1000_T; break; } if (adapter->link_duplex == FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } EM_CORE_UNLOCK(adapter); } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int em_media_change(if_t ifp) { struct adapter *adapter = if_getsoftc(ifp); struct ifmedia *ifm = &adapter->media; INIT_DEBUGOUT("em_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); EM_CORE_LOCK(adapter); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; break; case IFM_1000_LX: case IFM_1000_SX: case IFM_1000_T: adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; break; case IFM_100_TX: adapter->hw.mac.autoneg = FALSE; adapter->hw.phy.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; else adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; break; case IFM_10_T: adapter->hw.mac.autoneg = FALSE; adapter->hw.phy.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; else adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; break; default: device_printf(adapter->dev, "Unsupported media type\n"); } em_init_locked(adapter); EM_CORE_UNLOCK(adapter); return (0); } /********************************************************************* * * This routine maps the mbufs to tx descriptors. * * return 0 on success, positive on failure **********************************************************************/ static int em_xmit(struct tx_ring *txr, struct mbuf **m_headp) { struct adapter *adapter = txr->adapter; bus_dma_segment_t segs[EM_MAX_SCATTER]; bus_dmamap_t map; struct em_txbuffer *tx_buffer, *tx_buffer_mapped; struct e1000_tx_desc *ctxd = NULL; struct mbuf *m_head; struct ether_header *eh; struct ip *ip = NULL; struct tcphdr *tp = NULL; u32 txd_upper = 0, txd_lower = 0; int ip_off, poff; int nsegs, i, j, first, last = 0; int error; bool do_tso, tso_desc, remap = TRUE; m_head = *m_headp; do_tso = (m_head->m_pkthdr.csum_flags & CSUM_TSO); tso_desc = FALSE; ip_off = poff = 0; /* * Intel recommends entire IP/TCP header length reside in a single * buffer. If multiple descriptors are used to describe the IP and * TCP header, each descriptor should describe one or more * complete headers; descriptors referencing only parts of headers * are not supported. If all layer headers are not coalesced into * a single buffer, each buffer should not cross a 4KB boundary, * or be larger than the maximum read request size. * Controller also requires modifing IP/TCP header to make TSO work * so we firstly get a writable mbuf chain then coalesce ethernet/ * IP/TCP header into a single buffer to meet the requirement of * controller. This also simplifies IP/TCP/UDP checksum offloading * which also has similiar restrictions. */ if (do_tso || m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) { if (do_tso || (m_head->m_next != NULL && m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)) { if (M_WRITABLE(*m_headp) == 0) { m_head = m_dup(*m_headp, M_NOWAIT); m_freem(*m_headp); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } *m_headp = m_head; } } /* * XXX * Assume IPv4, we don't have TSO/checksum offload support * for IPv6 yet. */ ip_off = sizeof(struct ether_header); if (m_head->m_len < ip_off) { m_head = m_pullup(m_head, ip_off); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } } eh = mtod(m_head, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { ip_off = sizeof(struct ether_vlan_header); if (m_head->m_len < ip_off) { m_head = m_pullup(m_head, ip_off); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } } } if (m_head->m_len < ip_off + sizeof(struct ip)) { m_head = m_pullup(m_head, ip_off + sizeof(struct ip)); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } } ip = (struct ip *)(mtod(m_head, char *) + ip_off); poff = ip_off + (ip->ip_hl << 2); if (do_tso || (m_head->m_pkthdr.csum_flags & CSUM_TCP)) { if (m_head->m_len < poff + sizeof(struct tcphdr)) { m_head = m_pullup(m_head, poff + sizeof(struct tcphdr)); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } } tp = (struct tcphdr *)(mtod(m_head, char *) + poff); /* * TSO workaround: * pull 4 more bytes of data into it. */ if (m_head->m_len < poff + (tp->th_off << 2)) { m_head = m_pullup(m_head, poff + (tp->th_off << 2) + TSO_WORKAROUND); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } } ip = (struct ip *)(mtod(m_head, char *) + ip_off); tp = (struct tcphdr *)(mtod(m_head, char *) + poff); if (do_tso) { ip->ip_len = htons(m_head->m_pkthdr.tso_segsz + (ip->ip_hl << 2) + (tp->th_off << 2)); ip->ip_sum = 0; /* * The pseudo TCP checksum does not include TCP * payload length so driver should recompute * the checksum here what hardware expect to * see. This is adherence of Microsoft's Large * Send specification. */ tp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); } } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) { if (m_head->m_len < poff + sizeof(struct udphdr)) { m_head = m_pullup(m_head, poff + sizeof(struct udphdr)); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } } ip = (struct ip *)(mtod(m_head, char *) + ip_off); } *m_headp = m_head; } /* * Map the packet for DMA * * Capture the first descriptor index, * this descriptor will have the index * of the EOP which is the only one that * now gets a DONE bit writeback. */ first = txr->next_avail_desc; tx_buffer = &txr->tx_buffers[first]; tx_buffer_mapped = tx_buffer; map = tx_buffer->map; retry: error = bus_dmamap_load_mbuf_sg(txr->txtag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); /* * There are two types of errors we can (try) to handle: * - EFBIG means the mbuf chain was too long and bus_dma ran * out of segments. Defragment the mbuf chain and try again. * - ENOMEM means bus_dma could not obtain enough bounce buffers * at this point in time. Defer sending and try again later. * All other errors, in particular EINVAL, are fatal and prevent the * mbuf chain from ever going through. Drop it and report error. */ if (error == EFBIG && remap) { struct mbuf *m; m = m_collapse(*m_headp, M_NOWAIT, EM_MAX_SCATTER); if (m == NULL) { adapter->mbuf_defrag_failed++; m_freem(*m_headp); *m_headp = NULL; return (ENOBUFS); } *m_headp = m; /* Try it again, but only once */ remap = FALSE; goto retry; } else if (error != 0) { adapter->no_tx_dma_setup++; m_freem(*m_headp); *m_headp = NULL; return (error); } /* * TSO Hardware workaround, if this packet is not * TSO, and is only a single descriptor long, and * it follows a TSO burst, then we need to add a * sentinel descriptor to prevent premature writeback. */ if ((!do_tso) && (txr->tx_tso == TRUE)) { if (nsegs == 1) tso_desc = TRUE; txr->tx_tso = FALSE; } if (nsegs > (txr->tx_avail - EM_MAX_SCATTER)) { txr->no_desc_avail++; bus_dmamap_unload(txr->txtag, map); return (ENOBUFS); } m_head = *m_headp; /* Do hardware assists */ if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { em_tso_setup(txr, m_head, ip_off, ip, tp, &txd_upper, &txd_lower); /* we need to make a final sentinel transmit desc */ tso_desc = TRUE; } else if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) em_transmit_checksum_setup(txr, m_head, ip_off, ip, &txd_upper, &txd_lower); if (m_head->m_flags & M_VLANTAG) { /* Set the vlan id. */ txd_upper |= htole16(if_getvtag(m_head)) << 16; /* Tell hardware to add tag */ txd_lower |= htole32(E1000_TXD_CMD_VLE); } i = txr->next_avail_desc; /* Set up our transmit descriptors */ for (j = 0; j < nsegs; j++) { bus_size_t seg_len; bus_addr_t seg_addr; tx_buffer = &txr->tx_buffers[i]; ctxd = &txr->tx_base[i]; seg_addr = segs[j].ds_addr; seg_len = segs[j].ds_len; /* ** TSO Workaround: ** If this is the last descriptor, we want to ** split it so we have a small final sentinel */ if (tso_desc && (j == (nsegs - 1)) && (seg_len > 8)) { seg_len -= TSO_WORKAROUND; ctxd->buffer_addr = htole64(seg_addr); ctxd->lower.data = htole32( adapter->txd_cmd | txd_lower | seg_len); ctxd->upper.data = htole32(txd_upper); if (++i == adapter->num_tx_desc) i = 0; /* Now make the sentinel */ txr->tx_avail--; ctxd = &txr->tx_base[i]; tx_buffer = &txr->tx_buffers[i]; ctxd->buffer_addr = htole64(seg_addr + seg_len); ctxd->lower.data = htole32( adapter->txd_cmd | txd_lower | TSO_WORKAROUND); ctxd->upper.data = htole32(txd_upper); last = i; if (++i == adapter->num_tx_desc) i = 0; } else { ctxd->buffer_addr = htole64(seg_addr); ctxd->lower.data = htole32( adapter->txd_cmd | txd_lower | seg_len); ctxd->upper.data = htole32(txd_upper); last = i; if (++i == adapter->num_tx_desc) i = 0; } tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; } txr->next_avail_desc = i; txr->tx_avail -= nsegs; tx_buffer->m_head = m_head; /* ** Here we swap the map so the last descriptor, ** which gets the completion interrupt has the ** real map, and the first descriptor gets the ** unused map from this descriptor. */ tx_buffer_mapped->map = tx_buffer->map; tx_buffer->map = map; bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE); /* * Last Descriptor of Packet * needs End Of Packet (EOP) * and Report Status (RS) */ ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS); /* * Keep track in the first buffer which * descriptor will be written back */ tx_buffer = &txr->tx_buffers[first]; tx_buffer->next_eop = last; /* * Advance the Transmit Descriptor Tail (TDT), this tells the E1000 * that this frame is available to transmit. */ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i); return (0); } static void em_set_promisc(struct adapter *adapter) { if_t ifp = adapter->ifp; u32 reg_rctl; reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); if (if_getflags(ifp) & IFF_PROMISC) { reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); /* Turn this on if you want to see bad packets */ if (em_debug_sbp) reg_rctl |= E1000_RCTL_SBP; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } else if (if_getflags(ifp) & IFF_ALLMULTI) { reg_rctl |= E1000_RCTL_MPE; reg_rctl &= ~E1000_RCTL_UPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } } static void em_disable_promisc(struct adapter *adapter) { if_t ifp = adapter->ifp; u32 reg_rctl; int mcnt = 0; reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl &= (~E1000_RCTL_UPE); if (if_getflags(ifp) & IFF_ALLMULTI) mcnt = MAX_NUM_MULTICAST_ADDRESSES; else mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES); /* Don't disable if in MAX groups */ if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) reg_rctl &= (~E1000_RCTL_MPE); reg_rctl &= (~E1000_RCTL_SBP); E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } /********************************************************************* * Multicast Update * * This routine is called whenever multicast address list is updated. * **********************************************************************/ static void em_set_multi(struct adapter *adapter) { if_t ifp = adapter->ifp; u32 reg_rctl = 0; u8 *mta; /* Multicast array memory */ int mcnt = 0; IOCTL_DEBUGOUT("em_set_multi: begin"); mta = adapter->mta; bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); if (adapter->hw.mac.type == e1000_82542 && adapter->hw.revision_id == E1000_REVISION_2) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) e1000_pci_clear_mwi(&adapter->hw); reg_rctl |= E1000_RCTL_RST; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); msec_delay(5); } if_multiaddr_array(ifp, mta, &mcnt, MAX_NUM_MULTICAST_ADDRESSES); if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl |= E1000_RCTL_MPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } else e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); if (adapter->hw.mac.type == e1000_82542 && adapter->hw.revision_id == E1000_REVISION_2) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl &= ~E1000_RCTL_RST; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); msec_delay(5); if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) e1000_pci_set_mwi(&adapter->hw); } } /********************************************************************* * Timer routine * * This routine checks for link status and updates statistics. * **********************************************************************/ static void em_local_timer(void *arg) { struct adapter *adapter = arg; if_t ifp = adapter->ifp; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; u32 trigger = 0; EM_CORE_LOCK_ASSERT(adapter); em_update_link_status(adapter); em_update_stats_counters(adapter); /* Reset LAA into RAR[0] on 82571 */ if ((adapter->hw.mac.type == e1000_82571) && e1000_get_laa_state_82571(&adapter->hw)) e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); /* Mask to use in the irq trigger */ if (adapter->msix_mem) { for (int i = 0; i < adapter->num_queues; i++, rxr++) trigger |= rxr->ims; rxr = adapter->rx_rings; } else trigger = E1000_ICS_RXDMT0; /* ** Check on the state of the TX queue(s), this ** can be done without the lock because its RO ** and the HUNG state will be static if set. */ for (int i = 0; i < adapter->num_queues; i++, txr++) { if (txr->busy == EM_TX_HUNG) goto hung; if (txr->busy >= EM_TX_MAXTRIES) txr->busy = EM_TX_HUNG; /* Schedule a TX tasklet if needed */ if (txr->tx_avail <= EM_MAX_SCATTER) taskqueue_enqueue(txr->tq, &txr->tx_task); } callout_reset(&adapter->timer, hz, em_local_timer, adapter); #ifndef DEVICE_POLLING /* Trigger an RX interrupt to guarantee mbuf refresh */ E1000_WRITE_REG(&adapter->hw, E1000_ICS, trigger); #endif return; hung: /* Looks like we're hung */ device_printf(adapter->dev, "Watchdog timeout Queue[%d]-- resetting\n", txr->me); em_print_debug_info(adapter); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); adapter->watchdog_events++; em_init_locked(adapter); } static void em_update_link_status(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; if_t ifp = adapter->ifp; device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; u32 link_check = 0; /* Get the cached link value or read phy for real */ switch (hw->phy.media_type) { case e1000_media_type_copper: if (hw->mac.get_link_status) { if (hw->mac.type == e1000_pch_spt) msec_delay(50); /* Do the work to read phy */ e1000_check_for_link(hw); link_check = !hw->mac.get_link_status; if (link_check) /* ESB2 fix */ e1000_cfg_on_link_up(hw); } else link_check = TRUE; break; case e1000_media_type_fiber: e1000_check_for_link(hw); link_check = (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU); break; case e1000_media_type_internal_serdes: e1000_check_for_link(hw); link_check = adapter->hw.mac.serdes_has_link; break; default: case e1000_media_type_unknown: break; } /* Now check for a transition */ if (link_check && (adapter->link_active == 0)) { e1000_get_speed_and_duplex(hw, &adapter->link_speed, &adapter->link_duplex); /* Check if we must disable SPEED_MODE bit on PCI-E */ if ((adapter->link_speed != SPEED_1000) && ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572))) { int tarc0; tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); tarc0 &= ~TARC_SPEED_MODE_BIT; E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); } if (bootverbose) device_printf(dev, "Link is up %d Mbps %s\n", adapter->link_speed, ((adapter->link_duplex == FULL_DUPLEX) ? "Full Duplex" : "Half Duplex")); adapter->link_active = 1; adapter->smartspeed = 0; if_setbaudrate(ifp, adapter->link_speed * 1000000); if_link_state_change(ifp, LINK_STATE_UP); } else if (!link_check && (adapter->link_active == 1)) { if_setbaudrate(ifp, 0); adapter->link_speed = 0; adapter->link_duplex = 0; if (bootverbose) device_printf(dev, "Link is Down\n"); adapter->link_active = 0; /* Link down, disable hang detection */ for (int i = 0; i < adapter->num_queues; i++, txr++) txr->busy = EM_TX_IDLE; if_link_state_change(ifp, LINK_STATE_DOWN); } } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * * This routine should always be called with BOTH the CORE * and TX locks. **********************************************************************/ static void em_stop(void *arg) { struct adapter *adapter = arg; if_t ifp = adapter->ifp; struct tx_ring *txr = adapter->tx_rings; EM_CORE_LOCK_ASSERT(adapter); INIT_DEBUGOUT("em_stop: begin"); em_disable_intr(adapter); callout_stop(&adapter->timer); /* Tell the stack that the interface is no longer active */ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); /* Disarm Hang Detection. */ for (int i = 0; i < adapter->num_queues; i++, txr++) { EM_TX_LOCK(txr); txr->busy = EM_TX_IDLE; EM_TX_UNLOCK(txr); } /* I219 needs some special flushing to avoid hangs */ if (adapter->hw.mac.type == e1000_pch_spt) em_flush_desc_rings(adapter); e1000_reset_hw(&adapter->hw); E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); e1000_led_off(&adapter->hw); e1000_cleanup_led(&adapter->hw); } /********************************************************************* * * Determine hardware revision. * **********************************************************************/ static void em_identify_hardware(struct adapter *adapter) { device_t dev = adapter->dev; /* Make sure our PCI config space has the necessary stuff set */ pci_enable_busmaster(dev); adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); /* Save off the information about this board */ adapter->hw.vendor_id = pci_get_vendor(dev); adapter->hw.device_id = pci_get_device(dev); adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); adapter->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); /* Do Shared Code Init and Setup */ if (e1000_set_mac_type(&adapter->hw)) { device_printf(dev, "Setup init failure\n"); return; } } static int em_allocate_pci_resources(struct adapter *adapter) { device_t dev = adapter->dev; int rid; rid = PCIR_BAR(0); adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->memory == NULL) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->memory); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->memory); adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; adapter->hw.back = &adapter->osdep; return (0); } /********************************************************************* * * Setup the Legacy or MSI Interrupt handler * **********************************************************************/ int em_allocate_legacy(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; int error, rid = 0; /* Manually turn off all interrupts */ E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); if (adapter->msix == 1) /* using MSI */ rid = 1; /* We allocate a single interrupt resource */ adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (adapter->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "interrupt\n"); return (ENXIO); } /* * Allocate a fast interrupt and the associated * deferred processing contexts. */ TASK_INIT(&adapter->que_task, 0, em_handle_que, adapter); adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT, taskqueue_thread_enqueue, &adapter->tq); taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s que", device_get_nameunit(adapter->dev)); /* Use a TX only tasklet for local timer */ TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr); txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT, taskqueue_thread_enqueue, &txr->tq); taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq", device_get_nameunit(adapter->dev)); TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter); if ((error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET, em_irq_fast, NULL, adapter, &adapter->tag)) != 0) { device_printf(dev, "Failed to register fast interrupt " "handler: %d\n", error); taskqueue_free(adapter->tq); adapter->tq = NULL; return (error); } return (0); } /********************************************************************* * * Setup the MSIX Interrupt handlers * This is not really Multiqueue, rather * its just seperate interrupt vectors * for TX, RX, and Link. * **********************************************************************/ int em_allocate_msix(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; int error, rid, vector = 0; int cpu_id = 0; /* Make sure all interrupts are disabled */ E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); /* First set up ring resources */ for (int i = 0; i < adapter->num_queues; i++, rxr++, vector++) { /* RX ring */ rid = vector + 1; rxr->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (rxr->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "RX MSIX Interrupt %d\n", i); return (ENXIO); } if ((error = bus_setup_intr(dev, rxr->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, rxr, &rxr->tag)) != 0) { device_printf(dev, "Failed to register RX handler"); return (error); } #if __FreeBSD_version >= 800504 bus_describe_intr(dev, rxr->res, rxr->tag, "rx%d", i); #endif rxr->msix = vector; if (em_last_bind_cpu < 0) em_last_bind_cpu = CPU_FIRST(); cpu_id = em_last_bind_cpu; bus_bind_intr(dev, rxr->res, cpu_id); TASK_INIT(&rxr->rx_task, 0, em_handle_rx, rxr); rxr->tq = taskqueue_create_fast("em_rxq", M_NOWAIT, taskqueue_thread_enqueue, &rxr->tq); taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq (cpuid %d)", device_get_nameunit(adapter->dev), cpu_id); /* ** Set the bit to enable interrupt ** in E1000_IMS -- bits 20 and 21 ** are for RX0 and RX1, note this has ** NOTHING to do with the MSIX vector */ rxr->ims = 1 << (20 + i); adapter->ims |= rxr->ims; adapter->ivars |= (8 | rxr->msix) << (i * 4); em_last_bind_cpu = CPU_NEXT(em_last_bind_cpu); } for (int i = 0; i < adapter->num_queues; i++, txr++, vector++) { /* TX ring */ rid = vector + 1; txr->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (txr->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "TX MSIX Interrupt %d\n", i); return (ENXIO); } if ((error = bus_setup_intr(dev, txr->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, txr, &txr->tag)) != 0) { device_printf(dev, "Failed to register TX handler"); return (error); } #if __FreeBSD_version >= 800504 bus_describe_intr(dev, txr->res, txr->tag, "tx%d", i); #endif txr->msix = vector; if (em_last_bind_cpu < 0) em_last_bind_cpu = CPU_FIRST(); cpu_id = em_last_bind_cpu; bus_bind_intr(dev, txr->res, cpu_id); TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr); txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT, taskqueue_thread_enqueue, &txr->tq); taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq (cpuid %d)", device_get_nameunit(adapter->dev), cpu_id); /* ** Set the bit to enable interrupt ** in E1000_IMS -- bits 22 and 23 ** are for TX0 and TX1, note this has ** NOTHING to do with the MSIX vector */ txr->ims = 1 << (22 + i); adapter->ims |= txr->ims; adapter->ivars |= (8 | txr->msix) << (8 + (i * 4)); em_last_bind_cpu = CPU_NEXT(em_last_bind_cpu); } /* Link interrupt */ rid = vector + 1; adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (!adapter->res) { device_printf(dev,"Unable to allocate " "bus resource: Link interrupt [%d]\n", rid); return (ENXIO); } /* Set the link handler function */ error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_link, adapter, &adapter->tag); if (error) { adapter->res = NULL; device_printf(dev, "Failed to register LINK handler"); return (error); } #if __FreeBSD_version >= 800504 bus_describe_intr(dev, adapter->res, adapter->tag, "link"); #endif adapter->linkvec = vector; adapter->ivars |= (8 | vector) << 16; adapter->ivars |= 0x80000000; return (0); } static void em_free_pci_resources(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr; struct rx_ring *rxr; int rid; /* ** Release all the queue interrupt resources: */ for (int i = 0; i < adapter->num_queues; i++) { txr = &adapter->tx_rings[i]; /* an early abort? */ if (txr == NULL) break; rid = txr->msix +1; if (txr->tag != NULL) { bus_teardown_intr(dev, txr->res, txr->tag); txr->tag = NULL; } if (txr->res != NULL) bus_release_resource(dev, SYS_RES_IRQ, rid, txr->res); rxr = &adapter->rx_rings[i]; /* an early abort? */ if (rxr == NULL) break; rid = rxr->msix +1; if (rxr->tag != NULL) { bus_teardown_intr(dev, rxr->res, rxr->tag); rxr->tag = NULL; } if (rxr->res != NULL) bus_release_resource(dev, SYS_RES_IRQ, rid, rxr->res); } if (adapter->linkvec) /* we are doing MSIX */ rid = adapter->linkvec + 1; else (adapter->msix != 0) ? (rid = 1):(rid = 0); if (adapter->tag != NULL) { bus_teardown_intr(dev, adapter->res, adapter->tag); adapter->tag = NULL; } if (adapter->res != NULL) bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); if (adapter->msix) pci_release_msi(dev); if (adapter->msix_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem); if (adapter->memory != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), adapter->memory); if (adapter->flash != NULL) bus_release_resource(dev, SYS_RES_MEMORY, EM_FLASH, adapter->flash); } /* * Setup MSI or MSI/X */ static int em_setup_msix(struct adapter *adapter) { device_t dev = adapter->dev; int val; /* Nearly always going to use one queue */ adapter->num_queues = 1; /* ** Try using MSI-X for Hartwell adapters */ if ((adapter->hw.mac.type == e1000_82574) && (em_enable_msix == TRUE)) { #ifdef EM_MULTIQUEUE adapter->num_queues = (em_num_queues == 1) ? 1 : 2; if (adapter->num_queues > 1) em_enable_vectors_82574(adapter); #endif /* Map the MSIX BAR */ int rid = PCIR_BAR(EM_MSIX_BAR); adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->msix_mem == NULL) { /* May not be enabled */ device_printf(adapter->dev, "Unable to map MSIX table \n"); goto msi; } val = pci_msix_count(dev); #ifdef EM_MULTIQUEUE /* We need 5 vectors in the multiqueue case */ if (adapter->num_queues > 1 ) { if (val >= 5) val = 5; else { adapter->num_queues = 1; device_printf(adapter->dev, "Insufficient MSIX vectors for >1 queue, " "using single queue...\n"); goto msix_one; } } else { msix_one: #endif if (val >= 3) val = 3; else { device_printf(adapter->dev, "Insufficient MSIX vectors, using MSI\n"); goto msi; } #ifdef EM_MULTIQUEUE } #endif if ((pci_alloc_msix(dev, &val) == 0)) { device_printf(adapter->dev, "Using MSIX interrupts " "with %d vectors\n", val); return (val); } /* ** If MSIX alloc failed or provided us with ** less than needed, free and fall through to MSI */ pci_release_msi(dev); } msi: if (adapter->msix_mem != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem); adapter->msix_mem = NULL; } val = 1; if (pci_alloc_msi(dev, &val) == 0) { device_printf(adapter->dev, "Using an MSI interrupt\n"); return (val); } /* Should only happen due to manual configuration */ device_printf(adapter->dev,"No MSI/MSIX using a Legacy IRQ\n"); return (0); } /* ** The 3 following flush routines are used as a workaround in the ** I219 client parts and only for them. ** ** em_flush_tx_ring - remove all descriptors from the tx_ring ** ** We want to clear all pending descriptors from the TX ring. ** zeroing happens when the HW reads the regs. We assign the ring itself as ** the data of the next descriptor. We don't care about the data we are about ** to reset the HW. */ static void em_flush_tx_ring(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct tx_ring *txr = adapter->tx_rings; struct e1000_tx_desc *txd; u32 tctl, txd_lower = E1000_TXD_CMD_IFCS; u16 size = 512; tctl = E1000_READ_REG(hw, E1000_TCTL); E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN); txd = &txr->tx_base[txr->next_avail_desc++]; if (txr->next_avail_desc == adapter->num_tx_desc) txr->next_avail_desc = 0; /* Just use the ring as a dummy buffer addr */ txd->buffer_addr = txr->txdma.dma_paddr; txd->lower.data = htole32(txd_lower | size); txd->upper.data = 0; /* flush descriptors to memory before notifying the HW */ wmb(); E1000_WRITE_REG(hw, E1000_TDT(0), txr->next_avail_desc); mb(); usec_delay(250); } /* ** em_flush_rx_ring - remove all descriptors from the rx_ring ** ** Mark all descriptors in the RX ring as consumed and disable the rx ring */ static void em_flush_rx_ring(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 rctl, rxdctl; rctl = E1000_READ_REG(hw, E1000_RCTL); E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); E1000_WRITE_FLUSH(hw); usec_delay(150); rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); /* zero the lower 14 bits (prefetch and host thresholds) */ rxdctl &= 0xffffc000; /* * update thresholds: prefetch threshold to 31, host threshold to 1 * and make sure the granularity is "descriptors" and not "cache lines" */ rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC); E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl); /* momentarily enable the RX ring for the changes to take effect */ E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN); E1000_WRITE_FLUSH(hw); usec_delay(150); E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); } /* ** em_flush_desc_rings - remove all descriptors from the descriptor rings ** ** In i219, the descriptor rings must be emptied before resetting the HW ** or before changing the device state to D3 during runtime (runtime PM). ** ** Failure to do this will cause the HW to enter a unit hang state which can ** only be released by PCI reset on the device ** */ static void em_flush_desc_rings(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; device_t dev = adapter->dev; u16 hang_state; u32 fext_nvm11, tdlen; /* First, disable MULR fix in FEXTNVM11 */ fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11); fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11); /* do nothing if we're not in faulty state, or if the queue is empty */ tdlen = E1000_READ_REG(hw, E1000_TDLEN(0)); hang_state = pci_read_config(dev, PCICFG_DESC_RING_STATUS, 2); if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen) return; em_flush_tx_ring(adapter); /* recheck, maybe the fault is caused by the rx ring */ hang_state = pci_read_config(dev, PCICFG_DESC_RING_STATUS, 2); if (hang_state & FLUSH_DESC_REQUIRED) em_flush_rx_ring(adapter); } /********************************************************************* * * Initialize the hardware to a configuration * as specified by the adapter structure. * **********************************************************************/ static void em_reset(struct adapter *adapter) { device_t dev = adapter->dev; if_t ifp = adapter->ifp; struct e1000_hw *hw = &adapter->hw; u16 rx_buffer_size; u32 pba; INIT_DEBUGOUT("em_reset: begin"); /* Set up smart power down as default off on newer adapters. */ if (!em_smart_pwr_down && (hw->mac.type == e1000_82571 || hw->mac.type == e1000_82572)) { u16 phy_tmp = 0; /* Speed up time to link by disabling smart power down. */ e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp); phy_tmp &= ~IGP02E1000_PM_SPD; e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp); } /* * Packet Buffer Allocation (PBA) * Writing PBA sets the receive portion of the buffer * the remainder is used for the transmit buffer. */ switch (hw->mac.type) { /* Total Packet Buffer on these is 48K */ case e1000_82571: case e1000_82572: case e1000_80003es2lan: pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ break; case e1000_82573: /* 82573: Total Packet Buffer is 32K */ pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ break; case e1000_82574: case e1000_82583: pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ break; case e1000_ich8lan: pba = E1000_PBA_8K; break; case e1000_ich9lan: case e1000_ich10lan: /* Boost Receive side for jumbo frames */ if (adapter->hw.mac.max_frame_size > 4096) pba = E1000_PBA_14K; else pba = E1000_PBA_10K; break; case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: pba = E1000_PBA_26K; break; default: if (adapter->hw.mac.max_frame_size > 8192) pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ else pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ } E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba); /* * These parameters control the automatic generation (Tx) and * response (Rx) to Ethernet PAUSE frames. * - High water mark should allow for at least two frames to be * received after sending an XOFF. * - Low water mark works best when it is very near the high water mark. * This allows the receiver to restart by sending XON when it has * drained a bit. Here we use an arbitary value of 1500 which will * restart after one full frame is pulled from the buffer. There * could be several smaller frames in the buffer and if so they will * not trigger the XON until their total number reduces the buffer * by 1500. * - The pause time is fairly large at 1000 x 512ns = 512 usec. */ rx_buffer_size = ((E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10 ); hw->fc.high_water = rx_buffer_size - roundup2(adapter->hw.mac.max_frame_size, 1024); hw->fc.low_water = hw->fc.high_water - 1500; if (adapter->fc) /* locally set flow control value? */ hw->fc.requested_mode = adapter->fc; else hw->fc.requested_mode = e1000_fc_full; if (hw->mac.type == e1000_80003es2lan) hw->fc.pause_time = 0xFFFF; else hw->fc.pause_time = EM_FC_PAUSE_TIME; hw->fc.send_xon = TRUE; /* Device specific overrides/settings */ switch (hw->mac.type) { case e1000_pchlan: /* Workaround: no TX flow ctrl for PCH */ hw->fc.requested_mode = e1000_fc_rx_pause; hw->fc.pause_time = 0xFFFF; /* override */ if (if_getmtu(ifp) > ETHERMTU) { hw->fc.high_water = 0x3500; hw->fc.low_water = 0x1500; } else { hw->fc.high_water = 0x5000; hw->fc.low_water = 0x3000; } hw->fc.refresh_time = 0x1000; break; case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: hw->fc.high_water = 0x5C20; hw->fc.low_water = 0x5048; hw->fc.pause_time = 0x0650; hw->fc.refresh_time = 0x0400; /* Jumbos need adjusted PBA */ if (if_getmtu(ifp) > ETHERMTU) E1000_WRITE_REG(hw, E1000_PBA, 12); else E1000_WRITE_REG(hw, E1000_PBA, 26); break; case e1000_ich9lan: case e1000_ich10lan: if (if_getmtu(ifp) > ETHERMTU) { hw->fc.high_water = 0x2800; hw->fc.low_water = hw->fc.high_water - 8; break; } /* else fall thru */ default: if (hw->mac.type == e1000_80003es2lan) hw->fc.pause_time = 0xFFFF; break; } /* I219 needs some special flushing to avoid hangs */ if (hw->mac.type == e1000_pch_spt) em_flush_desc_rings(adapter); /* Issue a global reset */ e1000_reset_hw(hw); E1000_WRITE_REG(hw, E1000_WUC, 0); em_disable_aspm(adapter); /* and a re-init */ if (e1000_init_hw(hw) < 0) { device_printf(dev, "Hardware Initialization Failed\n"); return; } E1000_WRITE_REG(hw, E1000_VET, ETHERTYPE_VLAN); e1000_get_phy_info(hw); e1000_check_for_link(hw); return; } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ static int em_setup_interface(device_t dev, struct adapter *adapter) { if_t ifp; INIT_DEBUGOUT("em_setup_interface: begin"); ifp = adapter->ifp = if_gethandle(IFT_ETHER); if (ifp == 0) { device_printf(dev, "can not allocate ifnet structure\n"); return (-1); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setdev(ifp, dev); if_setinitfn(ifp, em_init); if_setsoftc(ifp, adapter); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setioctlfn(ifp, em_ioctl); if_setgetcounterfn(ifp, em_get_counter); + /* TSO parameters */ ifp->if_hw_tsomax = IP_MAXPACKET; - ifp->if_hw_tsomaxsegcount = EM_MAX_SCATTER; + /* Take m_pullup(9)'s in em_xmit() w/ TSO into acount. */ + ifp->if_hw_tsomaxsegcount = EM_MAX_SCATTER - 5; ifp->if_hw_tsomaxsegsize = EM_TSO_SEG_SIZE; #ifdef EM_MULTIQUEUE /* Multiqueue stack interface */ if_settransmitfn(ifp, em_mq_start); if_setqflushfn(ifp, em_qflush); #else if_setstartfn(ifp, em_start); if_setsendqlen(ifp, adapter->num_tx_desc - 1); if_setsendqready(ifp); #endif ether_ifattach(ifp, adapter->hw.mac.addr); if_setcapabilities(ifp, 0); if_setcapenable(ifp, 0); if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4, 0); /* * Tell the upper layer(s) we * support full VLAN capability */ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU, 0); if_setcapenable(ifp, if_getcapabilities(ifp)); /* ** Don't turn this on by default, if vlans are ** created on another pseudo device (eg. lagg) ** then vlan events are not passed thru, breaking ** operation, but with HW FILTER off it works. If ** using vlans directly on the em driver you can ** enable this and get full hardware tag filtering. */ if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER,0); #ifdef DEVICE_POLLING if_setcapabilitiesbit(ifp, IFCAP_POLLING,0); #endif /* Enable only WOL MAGIC by default */ if (adapter->wol) { if_setcapabilitiesbit(ifp, IFCAP_WOL, 0); if_setcapenablebit(ifp, IFCAP_WOL_MAGIC, 0); } /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, em_media_change, em_media_status); if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { u_char fiber_type = IFM_1000_SX; /* default type */ ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL); } else { ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); if (adapter->hw.phy.type != e1000_phy_ife) { ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); } } ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); return (0); } /* * Manage DMA'able memory. */ static void em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error) return; *(bus_addr_t *) arg = segs[0].ds_addr; } static int em_dma_malloc(struct adapter *adapter, bus_size_t size, struct em_dma_alloc *dma, int mapflags) { int error; error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ EM_DBA_ALIGN, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->dma_tag); if (error) { device_printf(adapter->dev, "%s: bus_dma_tag_create failed: %d\n", __func__, error); goto fail_0; } error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map); if (error) { device_printf(adapter->dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", __func__, (uintmax_t)size, error); goto fail_2; } dma->dma_paddr = 0; error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (error || dma->dma_paddr == 0) { device_printf(adapter->dev, "%s: bus_dmamap_load failed: %d\n", __func__, error); goto fail_3; } return (0); fail_3: bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_tag = NULL; return (error); } static void em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma) { if (dma->dma_tag == NULL) return; if (dma->dma_paddr != 0) { bus_dmamap_sync(dma->dma_tag, dma->dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->dma_tag, dma->dma_map); dma->dma_paddr = 0; } if (dma->dma_vaddr != NULL) { bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); dma->dma_vaddr = NULL; } bus_dma_tag_destroy(dma->dma_tag); dma->dma_tag = NULL; } /********************************************************************* * * Allocate memory for the transmit and receive rings, and then * the descriptors associated with each, called only once at attach. * **********************************************************************/ static int em_allocate_queues(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = NULL; struct rx_ring *rxr = NULL; int rsize, tsize, error = E1000_SUCCESS; int txconf = 0, rxconf = 0; /* Allocate the TX ring struct memory */ if (!(adapter->tx_rings = (struct tx_ring *) malloc(sizeof(struct tx_ring) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate TX ring memory\n"); error = ENOMEM; goto fail; } /* Now allocate the RX */ if (!(adapter->rx_rings = (struct rx_ring *) malloc(sizeof(struct rx_ring) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX ring memory\n"); error = ENOMEM; goto rx_fail; } tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc), EM_DBA_ALIGN); /* * Now set up the TX queues, txconf is needed to handle the * possibility that things fail midcourse and we need to * undo memory gracefully */ for (int i = 0; i < adapter->num_queues; i++, txconf++) { /* Set up some basics */ txr = &adapter->tx_rings[i]; txr->adapter = adapter; txr->me = i; /* Initialize the TX lock */ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", device_get_nameunit(dev), txr->me); mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); if (em_dma_malloc(adapter, tsize, &txr->txdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate TX Descriptor memory\n"); error = ENOMEM; goto err_tx_desc; } txr->tx_base = (struct e1000_tx_desc *)txr->txdma.dma_vaddr; bzero((void *)txr->tx_base, tsize); if (em_allocate_transmit_buffers(txr)) { device_printf(dev, "Critical Failure setting up transmit buffers\n"); error = ENOMEM; goto err_tx_desc; } #if __FreeBSD_version >= 800000 /* Allocate a buf ring */ txr->br = buf_ring_alloc(4096, M_DEVBUF, M_WAITOK, &txr->tx_mtx); #endif } /* * Next the RX queues... */ rsize = roundup2(adapter->num_rx_desc * sizeof(union e1000_rx_desc_extended), EM_DBA_ALIGN); for (int i = 0; i < adapter->num_queues; i++, rxconf++) { rxr = &adapter->rx_rings[i]; rxr->adapter = adapter; rxr->me = i; /* Initialize the RX lock */ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", device_get_nameunit(dev), txr->me); mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); if (em_dma_malloc(adapter, rsize, &rxr->rxdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate RxDescriptor memory\n"); error = ENOMEM; goto err_rx_desc; } rxr->rx_base = (union e1000_rx_desc_extended *)rxr->rxdma.dma_vaddr; bzero((void *)rxr->rx_base, rsize); /* Allocate receive buffers for the ring*/ if (em_allocate_receive_buffers(rxr)) { device_printf(dev, "Critical Failure setting up receive buffers\n"); error = ENOMEM; goto err_rx_desc; } } return (0); err_rx_desc: for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) em_dma_free(adapter, &rxr->rxdma); err_tx_desc: for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) em_dma_free(adapter, &txr->txdma); free(adapter->rx_rings, M_DEVBUF); rx_fail: #if __FreeBSD_version >= 800000 buf_ring_free(txr->br, M_DEVBUF); #endif free(adapter->tx_rings, M_DEVBUF); fail: return (error); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. This is * called only once at attach, setup is done every reset. * **********************************************************************/ static int em_allocate_transmit_buffers(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; device_t dev = adapter->dev; struct em_txbuffer *txbuf; int error, i; /* * Setup DMA descriptor areas. */ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ EM_TSO_SIZE, /* maxsize */ EM_MAX_SCATTER, /* nsegments */ PAGE_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txr->txtag))) { device_printf(dev,"Unable to allocate TX DMA tag\n"); goto fail; } if (!(txr->tx_buffers = (struct em_txbuffer *) malloc(sizeof(struct em_txbuffer) * adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); error = ENOMEM; goto fail; } /* Create the descriptor buffer dma maps */ txbuf = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { error = bus_dmamap_create(txr->txtag, 0, &txbuf->map); if (error != 0) { device_printf(dev, "Unable to create TX DMA map\n"); goto fail; } } return 0; fail: /* We free all, it handles case where we are in the middle */ em_free_transmit_structures(adapter); return (error); } /********************************************************************* * * Initialize a transmit ring. * **********************************************************************/ static void em_setup_transmit_ring(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct em_txbuffer *txbuf; int i; #ifdef DEV_NETMAP struct netmap_slot *slot; struct netmap_adapter *na = netmap_getna(adapter->ifp); #endif /* DEV_NETMAP */ /* Clear the old descriptor contents */ EM_TX_LOCK(txr); #ifdef DEV_NETMAP slot = netmap_reset(na, NR_TX, txr->me, 0); #endif /* DEV_NETMAP */ bzero((void *)txr->tx_base, (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc); /* Reset indices */ txr->next_avail_desc = 0; txr->next_to_clean = 0; /* Free any existing tx buffers. */ txbuf = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { if (txbuf->m_head != NULL) { bus_dmamap_sync(txr->txtag, txbuf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, txbuf->map); m_freem(txbuf->m_head); txbuf->m_head = NULL; } #ifdef DEV_NETMAP if (slot) { int si = netmap_idx_n2k(&na->tx_rings[txr->me], i); uint64_t paddr; void *addr; addr = PNMB(na, slot + si, &paddr); txr->tx_base[i].buffer_addr = htole64(paddr); /* reload the map for netmap mode */ netmap_load_map(na, txr->txtag, txbuf->map, addr); } #endif /* DEV_NETMAP */ /* clear the watch index */ txbuf->next_eop = -1; } /* Set number of descriptors available */ txr->tx_avail = adapter->num_tx_desc; txr->busy = EM_TX_IDLE; /* Clear checksum offload context. */ txr->last_hw_offload = 0; txr->last_hw_ipcss = 0; txr->last_hw_ipcso = 0; txr->last_hw_tucss = 0; txr->last_hw_tucso = 0; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); EM_TX_UNLOCK(txr); } /********************************************************************* * * Initialize all transmit rings. * **********************************************************************/ static void em_setup_transmit_structures(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; for (int i = 0; i < adapter->num_queues; i++, txr++) em_setup_transmit_ring(txr); return; } /********************************************************************* * * Enable transmit unit. * **********************************************************************/ static void em_initialize_transmit_unit(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; struct e1000_hw *hw = &adapter->hw; u32 tctl, txdctl = 0, tarc, tipg = 0; INIT_DEBUGOUT("em_initialize_transmit_unit: begin"); for (int i = 0; i < adapter->num_queues; i++, txr++) { u64 bus_addr = txr->txdma.dma_paddr; /* Base and Len of TX Ring */ E1000_WRITE_REG(hw, E1000_TDLEN(i), adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); E1000_WRITE_REG(hw, E1000_TDBAH(i), (u32)(bus_addr >> 32)); E1000_WRITE_REG(hw, E1000_TDBAL(i), (u32)bus_addr); /* Init the HEAD/TAIL indices */ E1000_WRITE_REG(hw, E1000_TDT(i), 0); E1000_WRITE_REG(hw, E1000_TDH(i), 0); HW_DEBUGOUT2("Base = %x, Length = %x\n", E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)), E1000_READ_REG(&adapter->hw, E1000_TDLEN(i))); txr->busy = EM_TX_IDLE; txdctl = 0; /* clear txdctl */ txdctl |= 0x1f; /* PTHRESH */ txdctl |= 1 << 8; /* HTHRESH */ txdctl |= 1 << 16;/* WTHRESH */ txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ txdctl |= E1000_TXDCTL_GRAN; txdctl |= 1 << 25; /* LWTHRESH */ E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); } /* Set the default values for the Tx Inter Packet Gap timer */ switch (adapter->hw.mac.type) { case e1000_80003es2lan: tipg = DEFAULT_82543_TIPG_IPGR1; tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; break; default: if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) tipg = DEFAULT_82543_TIPG_IPGT_FIBER; else tipg = DEFAULT_82543_TIPG_IPGT_COPPER; tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; } E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg); E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value); if(adapter->hw.mac.type >= e1000_82540) E1000_WRITE_REG(&adapter->hw, E1000_TADV, adapter->tx_abs_int_delay.value); if ((adapter->hw.mac.type == e1000_82571) || (adapter->hw.mac.type == e1000_82572)) { tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); tarc |= TARC_SPEED_MODE_BIT; E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); } else if (adapter->hw.mac.type == e1000_80003es2lan) { /* errata: program both queues to unweighted RR */ tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); tarc |= 1; E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1)); tarc |= 1; E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); } else if (adapter->hw.mac.type == e1000_82574) { tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); tarc |= TARC_ERRATA_BIT; if ( adapter->num_queues > 1) { tarc |= (TARC_COMPENSATION_MODE | TARC_MQ_FIX); E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); } else E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); } adapter->txd_cmd = E1000_TXD_CMD_IFCS; if (adapter->tx_int_delay.value > 0) adapter->txd_cmd |= E1000_TXD_CMD_IDE; /* Program the Transmit Control Register */ tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); tctl &= ~E1000_TCTL_CT; tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); if (adapter->hw.mac.type >= e1000_82571) tctl |= E1000_TCTL_MULR; /* This write will effectively turn on the transmit unit. */ E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); if (hw->mac.type == e1000_pch_spt) { u32 reg; reg = E1000_READ_REG(hw, E1000_IOSFPC); reg |= E1000_RCTL_RDMTS_HEX; E1000_WRITE_REG(hw, E1000_IOSFPC, reg); reg = E1000_READ_REG(hw, E1000_TARC(0)); reg |= E1000_TARC0_CB_MULTIQ_3_REQ; E1000_WRITE_REG(hw, E1000_TARC(0), reg); } } /********************************************************************* * * Free all transmit rings. * **********************************************************************/ static void em_free_transmit_structures(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; for (int i = 0; i < adapter->num_queues; i++, txr++) { EM_TX_LOCK(txr); em_free_transmit_buffers(txr); em_dma_free(adapter, &txr->txdma); EM_TX_UNLOCK(txr); EM_TX_LOCK_DESTROY(txr); } free(adapter->tx_rings, M_DEVBUF); } /********************************************************************* * * Free transmit ring related data structures. * **********************************************************************/ static void em_free_transmit_buffers(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct em_txbuffer *txbuf; INIT_DEBUGOUT("free_transmit_ring: begin"); if (txr->tx_buffers == NULL) return; for (int i = 0; i < adapter->num_tx_desc; i++) { txbuf = &txr->tx_buffers[i]; if (txbuf->m_head != NULL) { bus_dmamap_sync(txr->txtag, txbuf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, txbuf->map); m_freem(txbuf->m_head); txbuf->m_head = NULL; if (txbuf->map != NULL) { bus_dmamap_destroy(txr->txtag, txbuf->map); txbuf->map = NULL; } } else if (txbuf->map != NULL) { bus_dmamap_unload(txr->txtag, txbuf->map); bus_dmamap_destroy(txr->txtag, txbuf->map); txbuf->map = NULL; } } #if __FreeBSD_version >= 800000 if (txr->br != NULL) buf_ring_free(txr->br, M_DEVBUF); #endif if (txr->tx_buffers != NULL) { free(txr->tx_buffers, M_DEVBUF); txr->tx_buffers = NULL; } if (txr->txtag != NULL) { bus_dma_tag_destroy(txr->txtag); txr->txtag = NULL; } return; } /********************************************************************* * The offload context is protocol specific (TCP/UDP) and thus * only needs to be set when the protocol changes. The occasion * of a context change can be a performance detriment, and * might be better just disabled. The reason arises in the way * in which the controller supports pipelined requests from the * Tx data DMA. Up to four requests can be pipelined, and they may * belong to the same packet or to multiple packets. However all * requests for one packet are issued before a request is issued * for a subsequent packet and if a request for the next packet * requires a context change, that request will be stalled * until the previous request completes. This means setting up * a new context effectively disables pipelined Tx data DMA which * in turn greatly slow down performance to send small sized * frames. **********************************************************************/ static void em_transmit_checksum_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off, struct ip *ip, u32 *txd_upper, u32 *txd_lower) { struct adapter *adapter = txr->adapter; struct e1000_context_desc *TXD = NULL; struct em_txbuffer *tx_buffer; int cur, hdr_len; u32 cmd = 0; u16 offload = 0; u8 ipcso, ipcss, tucso, tucss; ipcss = ipcso = tucss = tucso = 0; hdr_len = ip_off + (ip->ip_hl << 2); cur = txr->next_avail_desc; /* Setup of IP header checksum. */ if (mp->m_pkthdr.csum_flags & CSUM_IP) { *txd_upper |= E1000_TXD_POPTS_IXSM << 8; offload |= CSUM_IP; ipcss = ip_off; ipcso = ip_off + offsetof(struct ip, ip_sum); /* * Start offset for header checksum calculation. * End offset for header checksum calculation. * Offset of place to put the checksum. */ TXD = (struct e1000_context_desc *)&txr->tx_base[cur]; TXD->lower_setup.ip_fields.ipcss = ipcss; TXD->lower_setup.ip_fields.ipcse = htole16(hdr_len); TXD->lower_setup.ip_fields.ipcso = ipcso; cmd |= E1000_TXD_CMD_IP; } if (mp->m_pkthdr.csum_flags & CSUM_TCP) { *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; *txd_upper |= E1000_TXD_POPTS_TXSM << 8; offload |= CSUM_TCP; tucss = hdr_len; tucso = hdr_len + offsetof(struct tcphdr, th_sum); /* * The 82574L can only remember the *last* context used * regardless of queue that it was use for. We cannot reuse * contexts on this hardware platform and must generate a new * context every time. 82574L hardware spec, section 7.2.6, * second note. */ if (adapter->num_queues < 2) { /* * Setting up new checksum offload context for every * frames takes a lot of processing time for hardware. * This also reduces performance a lot for small sized * frames so avoid it if driver can use previously * configured checksum offload context. */ if (txr->last_hw_offload == offload) { if (offload & CSUM_IP) { if (txr->last_hw_ipcss == ipcss && txr->last_hw_ipcso == ipcso && txr->last_hw_tucss == tucss && txr->last_hw_tucso == tucso) return; } else { if (txr->last_hw_tucss == tucss && txr->last_hw_tucso == tucso) return; } } txr->last_hw_offload = offload; txr->last_hw_tucss = tucss; txr->last_hw_tucso = tucso; } /* * Start offset for payload checksum calculation. * End offset for payload checksum calculation. * Offset of place to put the checksum. */ TXD = (struct e1000_context_desc *)&txr->tx_base[cur]; TXD->upper_setup.tcp_fields.tucss = hdr_len; TXD->upper_setup.tcp_fields.tucse = htole16(0); TXD->upper_setup.tcp_fields.tucso = tucso; cmd |= E1000_TXD_CMD_TCP; } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) { *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; *txd_upper |= E1000_TXD_POPTS_TXSM << 8; tucss = hdr_len; tucso = hdr_len + offsetof(struct udphdr, uh_sum); /* * The 82574L can only remember the *last* context used * regardless of queue that it was use for. We cannot reuse * contexts on this hardware platform and must generate a new * context every time. 82574L hardware spec, section 7.2.6, * second note. */ if (adapter->num_queues < 2) { /* * Setting up new checksum offload context for every * frames takes a lot of processing time for hardware. * This also reduces performance a lot for small sized * frames so avoid it if driver can use previously * configured checksum offload context. */ if (txr->last_hw_offload == offload) { if (offload & CSUM_IP) { if (txr->last_hw_ipcss == ipcss && txr->last_hw_ipcso == ipcso && txr->last_hw_tucss == tucss && txr->last_hw_tucso == tucso) return; } else { if (txr->last_hw_tucss == tucss && txr->last_hw_tucso == tucso) return; } } txr->last_hw_offload = offload; txr->last_hw_tucss = tucss; txr->last_hw_tucso = tucso; } /* * Start offset for header checksum calculation. * End offset for header checksum calculation. * Offset of place to put the checksum. */ TXD = (struct e1000_context_desc *)&txr->tx_base[cur]; TXD->upper_setup.tcp_fields.tucss = tucss; TXD->upper_setup.tcp_fields.tucse = htole16(0); TXD->upper_setup.tcp_fields.tucso = tucso; } if (offload & CSUM_IP) { txr->last_hw_ipcss = ipcss; txr->last_hw_ipcso = ipcso; } TXD->tcp_seg_setup.data = htole32(0); TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd); tx_buffer = &txr->tx_buffers[cur]; tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; if (++cur == adapter->num_tx_desc) cur = 0; txr->tx_avail--; txr->next_avail_desc = cur; } /********************************************************************** * * Setup work for hardware segmentation offload (TSO) * **********************************************************************/ static void em_tso_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off, struct ip *ip, struct tcphdr *tp, u32 *txd_upper, u32 *txd_lower) { struct adapter *adapter = txr->adapter; struct e1000_context_desc *TXD; struct em_txbuffer *tx_buffer; int cur, hdr_len; /* * In theory we can use the same TSO context if and only if * frame is the same type(IP/TCP) and the same MSS. However * checking whether a frame has the same IP/TCP structure is * hard thing so just ignore that and always restablish a * new TSO context. */ hdr_len = ip_off + (ip->ip_hl << 2) + (tp->th_off << 2); *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */ E1000_TXD_DTYP_D | /* Data descr type */ E1000_TXD_CMD_TSE); /* Do TSE on this packet */ /* IP and/or TCP header checksum calculation and insertion. */ *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; cur = txr->next_avail_desc; tx_buffer = &txr->tx_buffers[cur]; TXD = (struct e1000_context_desc *) &txr->tx_base[cur]; /* * Start offset for header checksum calculation. * End offset for header checksum calculation. * Offset of place put the checksum. */ TXD->lower_setup.ip_fields.ipcss = ip_off; TXD->lower_setup.ip_fields.ipcse = htole16(ip_off + (ip->ip_hl << 2) - 1); TXD->lower_setup.ip_fields.ipcso = ip_off + offsetof(struct ip, ip_sum); /* * Start offset for payload checksum calculation. * End offset for payload checksum calculation. * Offset of place to put the checksum. */ TXD->upper_setup.tcp_fields.tucss = ip_off + (ip->ip_hl << 2); TXD->upper_setup.tcp_fields.tucse = 0; TXD->upper_setup.tcp_fields.tucso = ip_off + (ip->ip_hl << 2) + offsetof(struct tcphdr, th_sum); /* * Payload size per packet w/o any headers. * Length of all headers up to payload. */ TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz); TXD->tcp_seg_setup.fields.hdr_len = hdr_len; TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | /* Extended descr */ E1000_TXD_CMD_TSE | /* TSE context */ E1000_TXD_CMD_IP | /* Do IP csum */ E1000_TXD_CMD_TCP | /* Do TCP checksum */ (mp->m_pkthdr.len - (hdr_len))); /* Total len */ tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; if (++cur == adapter->num_tx_desc) cur = 0; txr->tx_avail--; txr->next_avail_desc = cur; txr->tx_tso = TRUE; } /********************************************************************** * * Examine each tx_buffer in the used queue. If the hardware is done * processing the packet then free associated resources. The * tx_buffer is put back on the free queue. * **********************************************************************/ static void em_txeof(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; int first, last, done, processed; struct em_txbuffer *tx_buffer; struct e1000_tx_desc *tx_desc, *eop_desc; if_t ifp = adapter->ifp; EM_TX_LOCK_ASSERT(txr); #ifdef DEV_NETMAP if (netmap_tx_irq(ifp, txr->me)) return; #endif /* DEV_NETMAP */ /* No work, make sure hang detection is disabled */ if (txr->tx_avail == adapter->num_tx_desc) { txr->busy = EM_TX_IDLE; return; } processed = 0; first = txr->next_to_clean; tx_desc = &txr->tx_base[first]; tx_buffer = &txr->tx_buffers[first]; last = tx_buffer->next_eop; eop_desc = &txr->tx_base[last]; /* * What this does is get the index of the * first descriptor AFTER the EOP of the * first packet, that way we can do the * simple comparison on the inner while loop. */ if (++last == adapter->num_tx_desc) last = 0; done = last; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_POSTREAD); while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) { /* We clean the range of the packet */ while (first != done) { tx_desc->upper.data = 0; tx_desc->lower.data = 0; tx_desc->buffer_addr = 0; ++txr->tx_avail; ++processed; if (tx_buffer->m_head) { bus_dmamap_sync(txr->txtag, tx_buffer->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; } tx_buffer->next_eop = -1; if (++first == adapter->num_tx_desc) first = 0; tx_buffer = &txr->tx_buffers[first]; tx_desc = &txr->tx_base[first]; } if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); /* See if we can continue to the next packet */ last = tx_buffer->next_eop; if (last != -1) { eop_desc = &txr->tx_base[last]; /* Get new done point */ if (++last == adapter->num_tx_desc) last = 0; done = last; } else break; } bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); txr->next_to_clean = first; /* ** Hang detection: we know there's work outstanding ** or the entry return would have been taken, so no ** descriptor processed here indicates a potential hang. ** The local timer will examine this and do a reset if needed. */ if (processed == 0) { if (txr->busy != EM_TX_HUNG) ++txr->busy; } else /* At least one descriptor was cleaned */ txr->busy = EM_TX_BUSY; /* note this clears HUNG */ /* * If we have a minimum free, clear IFF_DRV_OACTIVE * to tell the stack that it is OK to send packets. * Notice that all writes of OACTIVE happen under the * TX lock which, with a single queue, guarantees * sanity. */ if (txr->tx_avail >= EM_MAX_SCATTER) { if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); } /* Disable hang detection if all clean */ if (txr->tx_avail == adapter->num_tx_desc) txr->busy = EM_TX_IDLE; } /********************************************************************* * * Refresh RX descriptor mbufs from system mbuf buffer pool. * **********************************************************************/ static void em_refresh_mbufs(struct rx_ring *rxr, int limit) { struct adapter *adapter = rxr->adapter; struct mbuf *m; bus_dma_segment_t segs; struct em_rxbuffer *rxbuf; int i, j, error, nsegs; bool cleaned = FALSE; i = j = rxr->next_to_refresh; /* ** Get one descriptor beyond ** our work mark to control ** the loop. */ if (++j == adapter->num_rx_desc) j = 0; while (j != limit) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->m_head == NULL) { m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); /* ** If we have a temporary resource shortage ** that causes a failure, just abort refresh ** for now, we will return to this point when ** reinvoked from em_rxeof. */ if (m == NULL) goto update; } else m = rxbuf->m_head; m->m_len = m->m_pkthdr.len = adapter->rx_mbuf_sz; m->m_flags |= M_PKTHDR; m->m_data = m->m_ext.ext_buf; /* Use bus_dma machinery to setup the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->rxtag, rxbuf->map, m, &segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { printf("Refresh mbufs: hdr dmamap load" " failure - %d\n", error); m_free(m); rxbuf->m_head = NULL; goto update; } rxbuf->m_head = m; rxbuf->paddr = segs.ds_addr; bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_PREREAD); em_setup_rxdesc(&rxr->rx_base[i], rxbuf); cleaned = TRUE; i = j; /* Next is precalulated for us */ rxr->next_to_refresh = i; /* Calculate next controlling index */ if (++j == adapter->num_rx_desc) j = 0; } update: /* ** Update the tail pointer only if, ** and as far as we have refreshed. */ if (cleaned) E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), rxr->next_to_refresh); return; } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per received packet, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've allocated. * **********************************************************************/ static int em_allocate_receive_buffers(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; device_t dev = adapter->dev; struct em_rxbuffer *rxbuf; int error; rxr->rx_buffers = malloc(sizeof(struct em_rxbuffer) * adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO); if (rxr->rx_buffers == NULL) { device_printf(dev, "Unable to allocate rx_buffer memory\n"); return (ENOMEM); } error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &rxr->rxtag); if (error) { device_printf(dev, "%s: bus_dma_tag_create failed %d\n", __func__, error); goto fail; } rxbuf = rxr->rx_buffers; for (int i = 0; i < adapter->num_rx_desc; i++, rxbuf++) { rxbuf = &rxr->rx_buffers[i]; error = bus_dmamap_create(rxr->rxtag, 0, &rxbuf->map); if (error) { device_printf(dev, "%s: bus_dmamap_create failed: %d\n", __func__, error); goto fail; } } return (0); fail: em_free_receive_structures(adapter); return (error); } /********************************************************************* * * Initialize a receive ring and its buffers. * **********************************************************************/ static int em_setup_receive_ring(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; struct em_rxbuffer *rxbuf; bus_dma_segment_t seg[1]; int rsize, nsegs, error = 0; #ifdef DEV_NETMAP struct netmap_slot *slot; struct netmap_adapter *na = netmap_getna(adapter->ifp); #endif /* Clear the ring contents */ EM_RX_LOCK(rxr); rsize = roundup2(adapter->num_rx_desc * sizeof(union e1000_rx_desc_extended), EM_DBA_ALIGN); bzero((void *)rxr->rx_base, rsize); #ifdef DEV_NETMAP slot = netmap_reset(na, NR_RX, rxr->me, 0); #endif /* ** Free current RX buffer structs and their mbufs */ for (int i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->m_head != NULL) { bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->rxtag, rxbuf->map); m_freem(rxbuf->m_head); rxbuf->m_head = NULL; /* mark as freed */ } } /* Now replenish the mbufs */ for (int j = 0; j != adapter->num_rx_desc; ++j) { rxbuf = &rxr->rx_buffers[j]; #ifdef DEV_NETMAP if (slot) { int si = netmap_idx_n2k(&na->rx_rings[rxr->me], j); uint64_t paddr; void *addr; addr = PNMB(na, slot + si, &paddr); netmap_load_map(na, rxr->rxtag, rxbuf->map, addr); em_setup_rxdesc(&rxr->rx_base[j], rxbuf); continue; } #endif /* DEV_NETMAP */ rxbuf->m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); if (rxbuf->m_head == NULL) { error = ENOBUFS; goto fail; } rxbuf->m_head->m_len = adapter->rx_mbuf_sz; rxbuf->m_head->m_flags &= ~M_HASFCS; /* we strip it */ rxbuf->m_head->m_pkthdr.len = adapter->rx_mbuf_sz; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->rxtag, rxbuf->map, rxbuf->m_head, seg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(rxbuf->m_head); rxbuf->m_head = NULL; goto fail; } bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_PREREAD); rxbuf->paddr = seg[0].ds_addr; em_setup_rxdesc(&rxr->rx_base[j], rxbuf); } rxr->next_to_check = 0; rxr->next_to_refresh = 0; bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); fail: EM_RX_UNLOCK(rxr); return (error); } /********************************************************************* * * Initialize all receive rings. * **********************************************************************/ static int em_setup_receive_structures(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; int q; for (q = 0; q < adapter->num_queues; q++, rxr++) if (em_setup_receive_ring(rxr)) goto fail; return (0); fail: /* * Free RX buffers allocated so far, we will only handle * the rings that completed, the failing case will have * cleaned up for itself. 'q' failed, so its the terminus. */ for (int i = 0; i < q; ++i) { rxr = &adapter->rx_rings[i]; for (int n = 0; n < adapter->num_rx_desc; n++) { struct em_rxbuffer *rxbuf; rxbuf = &rxr->rx_buffers[n]; if (rxbuf->m_head != NULL) { bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->rxtag, rxbuf->map); m_freem(rxbuf->m_head); rxbuf->m_head = NULL; } } rxr->next_to_check = 0; rxr->next_to_refresh = 0; } return (ENOBUFS); } /********************************************************************* * * Free all receive rings. * **********************************************************************/ static void em_free_receive_structures(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; for (int i = 0; i < adapter->num_queues; i++, rxr++) { em_free_receive_buffers(rxr); /* Free the ring memory as well */ em_dma_free(adapter, &rxr->rxdma); EM_RX_LOCK_DESTROY(rxr); } free(adapter->rx_rings, M_DEVBUF); } /********************************************************************* * * Free receive ring data structures * **********************************************************************/ static void em_free_receive_buffers(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; struct em_rxbuffer *rxbuf = NULL; INIT_DEBUGOUT("free_receive_buffers: begin"); if (rxr->rx_buffers != NULL) { for (int i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->map != NULL) { bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->rxtag, rxbuf->map); bus_dmamap_destroy(rxr->rxtag, rxbuf->map); } if (rxbuf->m_head != NULL) { m_freem(rxbuf->m_head); rxbuf->m_head = NULL; } } free(rxr->rx_buffers, M_DEVBUF); rxr->rx_buffers = NULL; rxr->next_to_check = 0; rxr->next_to_refresh = 0; } if (rxr->rxtag != NULL) { bus_dma_tag_destroy(rxr->rxtag); rxr->rxtag = NULL; } return; } /********************************************************************* * * Enable receive unit. * **********************************************************************/ static void em_initialize_receive_unit(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; if_t ifp = adapter->ifp; struct e1000_hw *hw = &adapter->hw; u32 rctl, rxcsum, rfctl; INIT_DEBUGOUT("em_initialize_receive_units: begin"); /* * Make sure receives are disabled while setting * up the descriptor ring */ rctl = E1000_READ_REG(hw, E1000_RCTL); /* Do not disable if ever enabled on this hardware */ if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583)) E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); /* Setup the Receive Control Register */ rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); /* Do not store bad packets */ rctl &= ~E1000_RCTL_SBP; /* Enable Long Packet receive */ if (if_getmtu(ifp) > ETHERMTU) rctl |= E1000_RCTL_LPE; else rctl &= ~E1000_RCTL_LPE; /* Strip the CRC */ if (!em_disable_crc_stripping) rctl |= E1000_RCTL_SECRC; E1000_WRITE_REG(&adapter->hw, E1000_RADV, adapter->rx_abs_int_delay.value); E1000_WRITE_REG(&adapter->hw, E1000_RDTR, adapter->rx_int_delay.value); /* * Set the interrupt throttling rate. Value is calculated * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */ E1000_WRITE_REG(hw, E1000_ITR, DEFAULT_ITR); /* Use extended rx descriptor formats */ rfctl = E1000_READ_REG(hw, E1000_RFCTL); rfctl |= E1000_RFCTL_EXTEN; /* ** When using MSIX interrupts we need to throttle ** using the EITR register (82574 only) */ if (hw->mac.type == e1000_82574) { for (int i = 0; i < 4; i++) E1000_WRITE_REG(hw, E1000_EITR_82574(i), DEFAULT_ITR); /* Disable accelerated acknowledge */ rfctl |= E1000_RFCTL_ACK_DIS; } E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); if (if_getcapenable(ifp) & IFCAP_RXCSUM) { #ifdef EM_MULTIQUEUE rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPOFL | E1000_RXCSUM_PCSD; #else rxcsum |= E1000_RXCSUM_TUOFL; #endif } else rxcsum &= ~E1000_RXCSUM_TUOFL; E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); #ifdef EM_MULTIQUEUE #define RSSKEYLEN 10 if (adapter->num_queues > 1) { uint8_t rss_key[4 * RSSKEYLEN]; uint32_t reta = 0; int i; /* * Configure RSS key */ arc4rand(rss_key, sizeof(rss_key), 0); for (i = 0; i < RSSKEYLEN; ++i) { uint32_t rssrk = 0; rssrk = EM_RSSRK_VAL(rss_key, i); E1000_WRITE_REG(hw,E1000_RSSRK(i), rssrk); } /* * Configure RSS redirect table in following fashion: * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] */ for (i = 0; i < sizeof(reta); ++i) { uint32_t q; q = (i % adapter->num_queues) << 7; reta |= q << (8 * i); } for (i = 0; i < 32; ++i) { E1000_WRITE_REG(hw, E1000_RETA(i), reta); } E1000_WRITE_REG(hw, E1000_MRQC, E1000_MRQC_RSS_ENABLE_2Q | E1000_MRQC_RSS_FIELD_IPV4_TCP | E1000_MRQC_RSS_FIELD_IPV4 | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX | E1000_MRQC_RSS_FIELD_IPV6_EX | E1000_MRQC_RSS_FIELD_IPV6); } #endif /* ** XXX TEMPORARY WORKAROUND: on some systems with 82573 ** long latencies are observed, like Lenovo X60. This ** change eliminates the problem, but since having positive ** values in RDTR is a known source of problems on other ** platforms another solution is being sought. */ if (hw->mac.type == e1000_82573) E1000_WRITE_REG(hw, E1000_RDTR, 0x20); for (int i = 0; i < adapter->num_queues; i++, rxr++) { /* Setup the Base and Length of the Rx Descriptor Ring */ u64 bus_addr = rxr->rxdma.dma_paddr; u32 rdt = adapter->num_rx_desc - 1; /* default */ E1000_WRITE_REG(hw, E1000_RDLEN(i), adapter->num_rx_desc * sizeof(union e1000_rx_desc_extended)); E1000_WRITE_REG(hw, E1000_RDBAH(i), (u32)(bus_addr >> 32)); E1000_WRITE_REG(hw, E1000_RDBAL(i), (u32)bus_addr); /* Setup the Head and Tail Descriptor Pointers */ E1000_WRITE_REG(hw, E1000_RDH(i), 0); #ifdef DEV_NETMAP /* * an init() while a netmap client is active must * preserve the rx buffers passed to userspace. */ if (if_getcapenable(ifp) & IFCAP_NETMAP) { struct netmap_adapter *na = netmap_getna(adapter->ifp); rdt -= nm_kr_rxspace(&na->rx_rings[i]); } #endif /* DEV_NETMAP */ E1000_WRITE_REG(hw, E1000_RDT(i), rdt); } /* * Set PTHRESH for improved jumbo performance * According to 10.2.5.11 of Intel 82574 Datasheet, * RXDCTL(1) is written whenever RXDCTL(0) is written. * Only write to RXDCTL(1) if there is a need for different * settings. */ if (((adapter->hw.mac.type == e1000_ich9lan) || (adapter->hw.mac.type == e1000_pch2lan) || (adapter->hw.mac.type == e1000_ich10lan)) && (if_getmtu(ifp) > ETHERMTU)) { u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3); } else if (adapter->hw.mac.type == e1000_82574) { for (int i = 0; i < adapter->num_queues; i++) { u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); rxdctl |= 0x20; /* PTHRESH */ rxdctl |= 4 << 8; /* HTHRESH */ rxdctl |= 4 << 16;/* WTHRESH */ rxdctl |= 1 << 24; /* Switch to granularity */ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); } } if (adapter->hw.mac.type >= e1000_pch2lan) { if (if_getmtu(ifp) > ETHERMTU) e1000_lv_jumbo_workaround_ich8lan(hw, TRUE); else e1000_lv_jumbo_workaround_ich8lan(hw, FALSE); } /* Make sure VLAN Filters are off */ rctl &= ~E1000_RCTL_VFE; if (adapter->rx_mbuf_sz == MCLBYTES) rctl |= E1000_RCTL_SZ_2048; else if (adapter->rx_mbuf_sz == MJUMPAGESIZE) rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; /* ensure we clear use DTYPE of 00 here */ rctl &= ~0x00000C00; /* Write out the settings */ E1000_WRITE_REG(hw, E1000_RCTL, rctl); return; } /********************************************************************* * * This routine executes in interrupt context. It replenishes * the mbufs in the descriptor and sends data which has been * dma'ed into host memory to upper layer. * * We loop at most count times if count is > 0, or until done if * count < 0. * * For polling we also now return the number of cleaned packets *********************************************************************/ static bool em_rxeof(struct rx_ring *rxr, int count, int *done) { struct adapter *adapter = rxr->adapter; if_t ifp = adapter->ifp; struct mbuf *mp, *sendmp; u32 status = 0; u16 len; int i, processed, rxdone = 0; bool eop; union e1000_rx_desc_extended *cur; EM_RX_LOCK(rxr); /* Sync the ring */ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); #ifdef DEV_NETMAP if (netmap_rx_irq(ifp, rxr->me, &processed)) { EM_RX_UNLOCK(rxr); return (FALSE); } #endif /* DEV_NETMAP */ for (i = rxr->next_to_check, processed = 0; count != 0;) { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) break; cur = &rxr->rx_base[i]; status = le32toh(cur->wb.upper.status_error); mp = sendmp = NULL; if ((status & E1000_RXD_STAT_DD) == 0) break; len = le16toh(cur->wb.upper.length); eop = (status & E1000_RXD_STAT_EOP) != 0; if ((status & E1000_RXDEXT_ERR_FRAME_ERR_MASK) || (rxr->discard == TRUE)) { adapter->dropped_pkts++; ++rxr->rx_discarded; if (!eop) /* Catch subsequent segs */ rxr->discard = TRUE; else rxr->discard = FALSE; em_rx_discard(rxr, i); goto next_desc; } bus_dmamap_unload(rxr->rxtag, rxr->rx_buffers[i].map); /* Assign correct length to the current fragment */ mp = rxr->rx_buffers[i].m_head; mp->m_len = len; /* Trigger for refresh */ rxr->rx_buffers[i].m_head = NULL; /* First segment? */ if (rxr->fmp == NULL) { mp->m_pkthdr.len = len; rxr->fmp = rxr->lmp = mp; } else { /* Chain mbuf's together */ mp->m_flags &= ~M_PKTHDR; rxr->lmp->m_next = mp; rxr->lmp = mp; rxr->fmp->m_pkthdr.len += len; } if (eop) { --count; sendmp = rxr->fmp; if_setrcvif(sendmp, ifp); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); em_receive_checksum(status, sendmp); #ifndef __NO_STRICT_ALIGNMENT if (adapter->hw.mac.max_frame_size > (MCLBYTES - ETHER_ALIGN) && em_fixup_rx(rxr) != 0) goto skip; #endif if (status & E1000_RXD_STAT_VP) { if_setvtag(sendmp, le16toh(cur->wb.upper.vlan)); sendmp->m_flags |= M_VLANTAG; } #ifndef __NO_STRICT_ALIGNMENT skip: #endif rxr->fmp = rxr->lmp = NULL; } next_desc: /* Sync the ring */ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* Zero out the receive descriptors status. */ cur->wb.upper.status_error &= htole32(~0xFF); ++rxdone; /* cumulative for POLL */ ++processed; /* Advance our pointers to the next descriptor. */ if (++i == adapter->num_rx_desc) i = 0; /* Send to the stack */ if (sendmp != NULL) { rxr->next_to_check = i; EM_RX_UNLOCK(rxr); if_input(ifp, sendmp); EM_RX_LOCK(rxr); i = rxr->next_to_check; } /* Only refresh mbufs every 8 descriptors */ if (processed == 8) { em_refresh_mbufs(rxr, i); processed = 0; } } /* Catch any remaining refresh work */ if (e1000_rx_unrefreshed(rxr)) em_refresh_mbufs(rxr, i); rxr->next_to_check = i; if (done != NULL) *done = rxdone; EM_RX_UNLOCK(rxr); return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE); } static __inline void em_rx_discard(struct rx_ring *rxr, int i) { struct em_rxbuffer *rbuf; rbuf = &rxr->rx_buffers[i]; bus_dmamap_unload(rxr->rxtag, rbuf->map); /* Free any previous pieces */ if (rxr->fmp != NULL) { rxr->fmp->m_flags |= M_PKTHDR; m_freem(rxr->fmp); rxr->fmp = NULL; rxr->lmp = NULL; } /* ** Free buffer and allow em_refresh_mbufs() ** to clean up and recharge buffer. */ if (rbuf->m_head) { m_free(rbuf->m_head); rbuf->m_head = NULL; } return; } #ifndef __NO_STRICT_ALIGNMENT /* * When jumbo frames are enabled we should realign entire payload on * architecures with strict alignment. This is serious design mistake of 8254x * as it nullifies DMA operations. 8254x just allows RX buffer size to be * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its * payload. On architecures without strict alignment restrictions 8254x still * performs unaligned memory access which would reduce the performance too. * To avoid copying over an entire frame to align, we allocate a new mbuf and * copy ethernet header to the new mbuf. The new mbuf is prepended into the * existing mbuf chain. * * Be aware, best performance of the 8254x is achived only when jumbo frame is * not used at all on architectures with strict alignment. */ static int em_fixup_rx(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; struct mbuf *m, *n; int error; error = 0; m = rxr->fmp; if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); m->m_data += ETHER_HDR_LEN; } else { MGETHDR(n, M_NOWAIT, MT_DATA); if (n != NULL) { bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); m->m_data += ETHER_HDR_LEN; m->m_len -= ETHER_HDR_LEN; n->m_len = ETHER_HDR_LEN; M_MOVE_PKTHDR(n, m); n->m_next = m; rxr->fmp = n; } else { adapter->dropped_pkts++; m_freem(rxr->fmp); rxr->fmp = NULL; error = ENOMEM; } } return (error); } #endif static void em_setup_rxdesc(union e1000_rx_desc_extended *rxd, const struct em_rxbuffer *rxbuf) { rxd->read.buffer_addr = htole64(rxbuf->paddr); /* DD bits must be cleared */ rxd->wb.upper.status_error= 0; } /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void em_receive_checksum(uint32_t status, struct mbuf *mp) { mp->m_pkthdr.csum_flags = 0; /* Ignore Checksum bit is set */ if (status & E1000_RXD_STAT_IXSM) return; /* If the IP checksum exists and there is no IP Checksum error */ if ((status & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == E1000_RXD_STAT_IPCS) { mp->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID); } /* TCP or UDP checksum */ if ((status & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == E1000_RXD_STAT_TCPCS) { mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mp->m_pkthdr.csum_data = htons(0xffff); } if (status & E1000_RXD_STAT_UDPCS) { mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mp->m_pkthdr.csum_data = htons(0xffff); } } /* * This routine is run via an vlan * config EVENT */ static void em_register_vlan(void *arg, if_t ifp, u16 vtag) { struct adapter *adapter = if_getsoftc(ifp); u32 index, bit; if ((void*)adapter != arg) /* Not our event */ return; if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */ return; EM_CORE_LOCK(adapter); index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; adapter->shadow_vfta[index] |= (1 << bit); ++adapter->num_vlans; /* Re-init to load the changes */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) em_init_locked(adapter); EM_CORE_UNLOCK(adapter); } /* * This routine is run via an vlan * unconfig EVENT */ static void em_unregister_vlan(void *arg, if_t ifp, u16 vtag) { struct adapter *adapter = if_getsoftc(ifp); u32 index, bit; if (adapter != arg) return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; EM_CORE_LOCK(adapter); index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; adapter->shadow_vfta[index] &= ~(1 << bit); --adapter->num_vlans; /* Re-init to load the changes */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) em_init_locked(adapter); EM_CORE_UNLOCK(adapter); } static void em_setup_vlan_hw_support(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 reg; /* ** We get here thru init_locked, meaning ** a soft reset, this has already cleared ** the VFTA and other state, so if there ** have been no vlan's registered do nothing. */ if (adapter->num_vlans == 0) return; /* ** A soft reset zero's out the VFTA, so ** we need to repopulate it now. */ for (int i = 0; i < EM_VFTA_SIZE; i++) if (adapter->shadow_vfta[i] != 0) E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, adapter->shadow_vfta[i]); reg = E1000_READ_REG(hw, E1000_CTRL); reg |= E1000_CTRL_VME; E1000_WRITE_REG(hw, E1000_CTRL, reg); /* Enable the Filter Table */ reg = E1000_READ_REG(hw, E1000_RCTL); reg &= ~E1000_RCTL_CFIEN; reg |= E1000_RCTL_VFE; E1000_WRITE_REG(hw, E1000_RCTL, reg); } static void em_enable_intr(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ims_mask = IMS_ENABLE_MASK; if (hw->mac.type == e1000_82574) { E1000_WRITE_REG(hw, EM_EIAC, adapter->ims); ims_mask |= adapter->ims; } E1000_WRITE_REG(hw, E1000_IMS, ims_mask); } static void em_disable_intr(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; if (hw->mac.type == e1000_82574) E1000_WRITE_REG(hw, EM_EIAC, 0); E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); } /* * Bit of a misnomer, what this really means is * to enable OS management of the system... aka * to disable special hardware management features */ static void em_init_manageability(struct adapter *adapter) { /* A shared code workaround */ #define E1000_82542_MANC2H E1000_MANC2H if (adapter->has_manage) { int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H); int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); /* disable hardware interception of ARP */ manc &= ~(E1000_MANC_ARP_EN); /* enable receiving management packets to the host */ manc |= E1000_MANC_EN_MNG2HOST; #define E1000_MNG2HOST_PORT_623 (1 << 5) #define E1000_MNG2HOST_PORT_664 (1 << 6) manc2h |= E1000_MNG2HOST_PORT_623; manc2h |= E1000_MNG2HOST_PORT_664; E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h); E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); } } /* * Give control back to hardware management * controller if there is one. */ static void em_release_manageability(struct adapter *adapter) { if (adapter->has_manage) { int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); /* re-enable hardware interception of ARP */ manc |= E1000_MANC_ARP_EN; manc &= ~E1000_MANC_EN_MNG2HOST; E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); } } /* * em_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means * that the driver is loaded. For AMT version type f/w * this means that the network i/f is open. */ static void em_get_hw_control(struct adapter *adapter) { u32 ctrl_ext, swsm; if (adapter->hw.mac.type == e1000_82573) { swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); E1000_WRITE_REG(&adapter->hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); return; } /* else */ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); return; } /* * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is no longer loaded. For AMT versions of the * f/w this means that the network i/f is closed. */ static void em_release_hw_control(struct adapter *adapter) { u32 ctrl_ext, swsm; if (!adapter->has_manage) return; if (adapter->hw.mac.type == e1000_82573) { swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); E1000_WRITE_REG(&adapter->hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); return; } /* else */ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); return; } static int em_is_valid_ether_addr(u8 *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { return (FALSE); } return (TRUE); } /* ** Parse the interface capabilities with regard ** to both system management and wake-on-lan for ** later use. */ static void em_get_wakeup(device_t dev) { struct adapter *adapter = device_get_softc(dev); u16 eeprom_data = 0, device_id, apme_mask; adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw); apme_mask = EM_EEPROM_APME; switch (adapter->hw.mac.type) { case e1000_82573: case e1000_82583: adapter->has_amt = TRUE; /* Falls thru */ case e1000_82571: case e1000_82572: case e1000_80003es2lan: if (adapter->hw.bus.func == 1) { e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); break; } else e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); break; case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: case e1000_pchlan: case e1000_pch2lan: apme_mask = E1000_WUC_APME; adapter->has_amt = TRUE; eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC); break; default: e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); break; } if (eeprom_data & apme_mask) adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC); /* * We have the eeprom settings, now apply the special cases * where the eeprom may be wrong or the board won't support * wake on lan on a particular port */ device_id = pci_get_device(dev); switch (device_id) { case E1000_DEV_ID_82571EB_FIBER: /* Wake events only supported on port A for dual fiber * regardless of eeprom setting */ if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_FUNC_1) adapter->wol = 0; break; case E1000_DEV_ID_82571EB_QUAD_COPPER: case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) adapter->wol = 0; /* Reset for multiple quad port adapters */ if (++global_quad_port_a == 4) global_quad_port_a = 0; break; } return; } /* * Enable PCI Wake On Lan capability */ static void em_enable_wakeup(device_t dev) { struct adapter *adapter = device_get_softc(dev); if_t ifp = adapter->ifp; u32 pmc, ctrl, ctrl_ext, rctl; u16 status; if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0)) return; /* Advertise the wakeup capability */ ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3); E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); if ((adapter->hw.mac.type == e1000_ich8lan) || (adapter->hw.mac.type == e1000_pchlan) || (adapter->hw.mac.type == e1000_ich9lan) || (adapter->hw.mac.type == e1000_ich10lan)) e1000_suspend_workarounds_ich8lan(&adapter->hw); /* Keep the laser running on Fiber adapters */ if (adapter->hw.phy.media_type == e1000_media_type_fiber || adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext); } /* ** Determine type of Wakeup: note that wol ** is set with all bits on by default. */ if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0) adapter->wol &= ~E1000_WUFC_MAG; if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0) adapter->wol &= ~E1000_WUFC_MC; else { rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); rctl |= E1000_RCTL_MPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); } if ((adapter->hw.mac.type == e1000_pchlan) || (adapter->hw.mac.type == e1000_pch2lan)) { if (em_enable_phy_wakeup(adapter)) return; } else { E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); } if (adapter->hw.phy.type == e1000_phy_igp_3) e1000_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); /* Request PME */ status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2); status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if (if_getcapenable(ifp) & IFCAP_WOL) status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2); return; } /* ** WOL in the newer chipset interfaces (pchlan) ** require thing to be copied into the phy */ static int em_enable_phy_wakeup(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 mreg, ret = 0; u16 preg; /* copy MAC RARs to PHY RARs */ e1000_copy_rx_addrs_to_phy_ich8lan(hw); /* copy MAC MTA to PHY MTA */ for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) { mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF)); e1000_write_phy_reg(hw, BM_MTA(i) + 1, (u16)((mreg >> 16) & 0xFFFF)); } /* configure PHY Rx Control register */ e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg); mreg = E1000_READ_REG(hw, E1000_RCTL); if (mreg & E1000_RCTL_UPE) preg |= BM_RCTL_UPE; if (mreg & E1000_RCTL_MPE) preg |= BM_RCTL_MPE; preg &= ~(BM_RCTL_MO_MASK); if (mreg & E1000_RCTL_MO_3) preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) << BM_RCTL_MO_SHIFT); if (mreg & E1000_RCTL_BAM) preg |= BM_RCTL_BAM; if (mreg & E1000_RCTL_PMCF) preg |= BM_RCTL_PMCF; mreg = E1000_READ_REG(hw, E1000_CTRL); if (mreg & E1000_CTRL_RFCE) preg |= BM_RCTL_RFCE; e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg); /* enable PHY wakeup in MAC register */ E1000_WRITE_REG(hw, E1000_WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol); /* configure and enable PHY wakeup in PHY registers */ e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol); e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); /* activate PHY wakeup */ ret = hw->phy.ops.acquire(hw); if (ret) { printf("Could not acquire PHY\n"); return ret; } e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg); if (ret) { printf("Could not read PHY page 769\n"); goto out; } preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg); if (ret) printf("Could not set PHY Host Wakeup bit\n"); out: hw->phy.ops.release(hw); return ret; } static void em_led_func(void *arg, int onoff) { struct adapter *adapter = arg; EM_CORE_LOCK(adapter); if (onoff) { e1000_setup_led(&adapter->hw); e1000_led_on(&adapter->hw); } else { e1000_led_off(&adapter->hw); e1000_cleanup_led(&adapter->hw); } EM_CORE_UNLOCK(adapter); } /* ** Disable the L0S and L1 LINK states */ static void em_disable_aspm(struct adapter *adapter) { int base, reg; u16 link_cap,link_ctrl; device_t dev = adapter->dev; switch (adapter->hw.mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: break; default: return; } if (pci_find_cap(dev, PCIY_EXPRESS, &base) != 0) return; reg = base + PCIER_LINK_CAP; link_cap = pci_read_config(dev, reg, 2); if ((link_cap & PCIEM_LINK_CAP_ASPM) == 0) return; reg = base + PCIER_LINK_CTL; link_ctrl = pci_read_config(dev, reg, 2); link_ctrl &= ~PCIEM_LINK_CTL_ASPMC; pci_write_config(dev, reg, link_ctrl, 2); return; } /********************************************************************** * * Update the board statistics counters. * **********************************************************************/ static void em_update_stats_counters(struct adapter *adapter) { if(adapter->hw.phy.media_type == e1000_media_type_copper || (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) { adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS); adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC); } adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS); adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC); adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC); adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL); adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC); adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL); adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC); adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC); adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC); adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC); adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC); adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC); adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC); adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64); adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127); adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255); adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511); adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023); adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522); adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC); adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC); adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC); adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC); /* For the 64-bit byte counters the low dword must be read first. */ /* Both registers clear on the read of the high dword */ adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) + ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32); adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) + ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32); adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC); adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC); adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC); adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC); adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC); adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH); adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH); adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR); adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT); adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64); adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127); adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255); adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511); adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023); adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522); adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC); adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC); /* Interrupt Counts */ adapter->stats.iac += E1000_READ_REG(&adapter->hw, E1000_IAC); adapter->stats.icrxptc += E1000_READ_REG(&adapter->hw, E1000_ICRXPTC); adapter->stats.icrxatc += E1000_READ_REG(&adapter->hw, E1000_ICRXATC); adapter->stats.ictxptc += E1000_READ_REG(&adapter->hw, E1000_ICTXPTC); adapter->stats.ictxatc += E1000_READ_REG(&adapter->hw, E1000_ICTXATC); adapter->stats.ictxqec += E1000_READ_REG(&adapter->hw, E1000_ICTXQEC); adapter->stats.ictxqmtc += E1000_READ_REG(&adapter->hw, E1000_ICTXQMTC); adapter->stats.icrxdmtc += E1000_READ_REG(&adapter->hw, E1000_ICRXDMTC); adapter->stats.icrxoc += E1000_READ_REG(&adapter->hw, E1000_ICRXOC); if (adapter->hw.mac.type >= e1000_82543) { adapter->stats.algnerrc += E1000_READ_REG(&adapter->hw, E1000_ALGNERRC); adapter->stats.rxerrc += E1000_READ_REG(&adapter->hw, E1000_RXERRC); adapter->stats.tncrs += E1000_READ_REG(&adapter->hw, E1000_TNCRS); adapter->stats.cexterr += E1000_READ_REG(&adapter->hw, E1000_CEXTERR); adapter->stats.tsctc += E1000_READ_REG(&adapter->hw, E1000_TSCTC); adapter->stats.tsctfc += E1000_READ_REG(&adapter->hw, E1000_TSCTFC); } } static uint64_t em_get_counter(if_t ifp, ift_counter cnt) { struct adapter *adapter; adapter = if_getsoftc(ifp); switch (cnt) { case IFCOUNTER_COLLISIONS: return (adapter->stats.colc); case IFCOUNTER_IERRORS: return (adapter->dropped_pkts + adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc + adapter->stats.mpc + adapter->stats.cexterr); case IFCOUNTER_OERRORS: return (adapter->stats.ecol + adapter->stats.latecol + adapter->watchdog_events); default: return (if_get_counter_default(ifp, cnt)); } } /* Export a single 32-bit register via a read-only sysctl. */ static int em_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; u_int val; adapter = oidp->oid_arg1; val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2); return (sysctl_handle_int(oidp, &val, 0, req)); } /* * Add sysctl variables, one per statistic, to the system. */ static void em_add_hw_stats(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct e1000_hw_stats *stats = &adapter->stats; struct sysctl_oid *stat_node, *queue_node, *int_node; struct sysctl_oid_list *stat_list, *queue_list, *int_list; #define QUEUE_NAME_LEN 32 char namebuf[QUEUE_NAME_LEN]; /* Driver Statistics */ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", CTLFLAG_RD, &adapter->link_irq, "Link MSIX IRQ Handled"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_fail", CTLFLAG_RD, &adapter->mbuf_defrag_failed, "Defragmenting mbuf chain failed"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", CTLFLAG_RD, &adapter->no_tx_dma_setup, "Driver tx dma failure in xmit"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", CTLFLAG_RD, &adapter->rx_overruns, "RX overruns"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL, em_sysctl_reg_handler, "IU", "Device Control Register"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL, em_sysctl_reg_handler, "IU", "Receiver Control Register"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", CTLFLAG_RD, &adapter->hw.fc.high_water, 0, "Flow Control High Watermark"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", CTLFLAG_RD, &adapter->hw.fc.low_water, 0, "Flow Control Low Watermark"); for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) { snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "TX Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(txr->me), em_sysctl_reg_handler, "IU", "Transmit Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(txr->me), em_sysctl_reg_handler, "IU", "Transmit Descriptor Tail"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq", CTLFLAG_RD, &txr->tx_irq, "Queue MSI-X Transmit Interrupts"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &txr->no_desc_avail, "Queue No Descriptor Available"); snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "RX Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(rxr->me), em_sysctl_reg_handler, "IU", "Receive Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(rxr->me), em_sysctl_reg_handler, "IU", "Receive Descriptor Tail"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq", CTLFLAG_RD, &rxr->rx_irq, "Queue MSI-X Receive Interrupts"); } /* MAC stats get their own sub node */ stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", CTLFLAG_RD, NULL, "Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll", CTLFLAG_RD, &stats->ecol, "Excessive collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll", CTLFLAG_RD, &stats->scc, "Single collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll", CTLFLAG_RD, &stats->mcc, "Multiple collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll", CTLFLAG_RD, &stats->latecol, "Late collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count", CTLFLAG_RD, &stats->colc, "Collision Count"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors", CTLFLAG_RD, &adapter->stats.symerrs, "Symbol Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors", CTLFLAG_RD, &adapter->stats.sec, "Sequence Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count", CTLFLAG_RD, &adapter->stats.dc, "Defer Count"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets", CTLFLAG_RD, &adapter->stats.mpc, "Missed Packets"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", CTLFLAG_RD, &adapter->stats.rnbc, "Receive No Buffers"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize", CTLFLAG_RD, &adapter->stats.ruc, "Receive Undersize"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", CTLFLAG_RD, &adapter->stats.rfc, "Fragmented Packets Received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize", CTLFLAG_RD, &adapter->stats.roc, "Oversized Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber", CTLFLAG_RD, &adapter->stats.rjc, "Recevied Jabber"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs", CTLFLAG_RD, &adapter->stats.rxerrc, "Receive Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", CTLFLAG_RD, &adapter->stats.crcerrs, "CRC errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs", CTLFLAG_RD, &adapter->stats.algnerrc, "Alignment Errors"); /* On 82575 these are collision counts */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs", CTLFLAG_RD, &adapter->stats.cexterr, "Collision/Carrier extension errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", CTLFLAG_RD, &adapter->stats.xonrxc, "XON Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", CTLFLAG_RD, &adapter->stats.xontxc, "XON Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", CTLFLAG_RD, &adapter->stats.xoffrxc, "XOFF Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", CTLFLAG_RD, &adapter->stats.xofftxc, "XOFF Transmitted"); /* Packet Reception Stats */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", CTLFLAG_RD, &adapter->stats.tpr, "Total Packets Received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", CTLFLAG_RD, &adapter->stats.gprc, "Good Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", CTLFLAG_RD, &adapter->stats.bprc, "Broadcast Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", CTLFLAG_RD, &adapter->stats.mprc, "Multicast Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", CTLFLAG_RD, &adapter->stats.prc64, "64 byte frames received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", CTLFLAG_RD, &adapter->stats.prc127, "65-127 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", CTLFLAG_RD, &adapter->stats.prc255, "128-255 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", CTLFLAG_RD, &adapter->stats.prc511, "256-511 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", CTLFLAG_RD, &adapter->stats.prc1023, "512-1023 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", CTLFLAG_RD, &adapter->stats.prc1522, "1023-1522 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", CTLFLAG_RD, &adapter->stats.gorc, "Good Octets Received"); /* Packet Transmission Stats */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", CTLFLAG_RD, &adapter->stats.gotc, "Good Octets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", CTLFLAG_RD, &adapter->stats.tpt, "Total Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", CTLFLAG_RD, &adapter->stats.gptc, "Good Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", CTLFLAG_RD, &adapter->stats.bptc, "Broadcast Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", CTLFLAG_RD, &adapter->stats.mptc, "Multicast Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", CTLFLAG_RD, &adapter->stats.ptc64, "64 byte frames transmitted "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", CTLFLAG_RD, &adapter->stats.ptc127, "65-127 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", CTLFLAG_RD, &adapter->stats.ptc255, "128-255 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", CTLFLAG_RD, &adapter->stats.ptc511, "256-511 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", CTLFLAG_RD, &adapter->stats.ptc1023, "512-1023 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", CTLFLAG_RD, &adapter->stats.ptc1522, "1024-1522 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd", CTLFLAG_RD, &adapter->stats.tsctc, "TSO Contexts Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail", CTLFLAG_RD, &adapter->stats.tsctfc, "TSO Contexts Failed"); /* Interrupt Stats */ int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", CTLFLAG_RD, NULL, "Interrupt Statistics"); int_list = SYSCTL_CHILDREN(int_node); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts", CTLFLAG_RD, &adapter->stats.iac, "Interrupt Assertion Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer", CTLFLAG_RD, &adapter->stats.icrxptc, "Interrupt Cause Rx Pkt Timer Expire Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_abs_timer", CTLFLAG_RD, &adapter->stats.icrxatc, "Interrupt Cause Rx Abs Timer Expire Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer", CTLFLAG_RD, &adapter->stats.ictxptc, "Interrupt Cause Tx Pkt Timer Expire Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_abs_timer", CTLFLAG_RD, &adapter->stats.ictxatc, "Interrupt Cause Tx Abs Timer Expire Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_empty", CTLFLAG_RD, &adapter->stats.ictxqec, "Interrupt Cause Tx Queue Empty Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh", CTLFLAG_RD, &adapter->stats.ictxqmtc, "Interrupt Cause Tx Queue Min Thresh Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh", CTLFLAG_RD, &adapter->stats.icrxdmtc, "Interrupt Cause Rx Desc Min Thresh Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_overrun", CTLFLAG_RD, &adapter->stats.icrxoc, "Interrupt Cause Receiver Overrun Count"); } /********************************************************************** * * This routine provides a way to dump out the adapter eeprom, * often a useful debug/service tool. This only dumps the first * 32 words, stuff that matters is in that extent. * **********************************************************************/ static int em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *)arg1; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); /* * This value will cause a hex dump of the * first 32 16-bit words of the EEPROM to * the screen. */ if (result == 1) em_print_nvm_info(adapter); return (error); } static void em_print_nvm_info(struct adapter *adapter) { u16 eeprom_data; int i, j, row = 0; /* Its a bit crude, but it gets the job done */ printf("\nInterface EEPROM Dump:\n"); printf("Offset\n0x0000 "); for (i = 0, j = 0; i < 32; i++, j++) { if (j == 8) { /* Make the offset block */ j = 0; ++row; printf("\n0x00%x0 ",row); } e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); printf("%04x ", eeprom_data); } printf("\n"); } static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS) { struct em_int_delay_info *info; struct adapter *adapter; u32 regval; int error, usecs, ticks; info = (struct em_int_delay_info *)arg1; usecs = info->value; error = sysctl_handle_int(oidp, &usecs, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535)) return (EINVAL); info->value = usecs; ticks = EM_USECS_TO_TICKS(usecs); if (info->offset == E1000_ITR) /* units are 256ns here */ ticks *= 4; adapter = info->adapter; EM_CORE_LOCK(adapter); regval = E1000_READ_OFFSET(&adapter->hw, info->offset); regval = (regval & ~0xffff) | (ticks & 0xffff); /* Handle a few special cases. */ switch (info->offset) { case E1000_RDTR: break; case E1000_TIDV: if (ticks == 0) { adapter->txd_cmd &= ~E1000_TXD_CMD_IDE; /* Don't write 0 into the TIDV register. */ regval++; } else adapter->txd_cmd |= E1000_TXD_CMD_IDE; break; } E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval); EM_CORE_UNLOCK(adapter); return (0); } static void em_add_int_delay_sysctl(struct adapter *adapter, const char *name, const char *description, struct em_int_delay_info *info, int offset, int value) { info->adapter = adapter; info->offset = offset; info->value = value; SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, info, 0, em_sysctl_int_delay, "I", description); } static void em_set_sysctl_value(struct adapter *adapter, const char *name, const char *description, int *limit, int value) { *limit = value; SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), OID_AUTO, name, CTLFLAG_RW, limit, value, description); } /* ** Set flow control using sysctl: ** Flow control values: ** 0 - off ** 1 - rx pause ** 2 - tx pause ** 3 - full */ static int em_set_flowcntl(SYSCTL_HANDLER_ARGS) { int error; static int input = 3; /* default is full */ struct adapter *adapter = (struct adapter *) arg1; error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (input == adapter->fc) /* no change? */ return (error); switch (input) { case e1000_fc_rx_pause: case e1000_fc_tx_pause: case e1000_fc_full: case e1000_fc_none: adapter->hw.fc.requested_mode = input; adapter->fc = input; break; default: /* Do nothing */ return (error); } adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode; e1000_force_mac_fc(&adapter->hw); return (error); } /* ** Manage Energy Efficient Ethernet: ** Control values: ** 0/1 - enabled/disabled */ static int em_sysctl_eee(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *) arg1; int error, value; value = adapter->hw.dev_spec.ich8lan.eee_disable; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); EM_CORE_LOCK(adapter); adapter->hw.dev_spec.ich8lan.eee_disable = (value != 0); em_init_locked(adapter); EM_CORE_UNLOCK(adapter); return (0); } static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { adapter = (struct adapter *)arg1; em_print_debug_info(adapter); } return (error); } /* ** This routine is meant to be fluid, add whatever is ** needed for debugging a problem. -jfv */ static void em_print_debug_info(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; if (if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) printf("Interface is RUNNING "); else printf("Interface is NOT RUNNING\n"); if (if_getdrvflags(adapter->ifp) & IFF_DRV_OACTIVE) printf("and INACTIVE\n"); else printf("and ACTIVE\n"); for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) { device_printf(dev, "TX Queue %d ------\n", i); device_printf(dev, "hw tdh = %d, hw tdt = %d\n", E1000_READ_REG(&adapter->hw, E1000_TDH(i)), E1000_READ_REG(&adapter->hw, E1000_TDT(i))); device_printf(dev, "Tx Queue Status = %d\n", txr->busy); device_printf(dev, "TX descriptors avail = %d\n", txr->tx_avail); device_printf(dev, "Tx Descriptors avail failure = %ld\n", txr->no_desc_avail); device_printf(dev, "RX Queue %d ------\n", i); device_printf(dev, "hw rdh = %d, hw rdt = %d\n", E1000_READ_REG(&adapter->hw, E1000_RDH(i)), E1000_READ_REG(&adapter->hw, E1000_RDT(i))); device_printf(dev, "RX discarded packets = %ld\n", rxr->rx_discarded); device_printf(dev, "RX Next to Check = %d\n", rxr->next_to_check); device_printf(dev, "RX Next to Refresh = %d\n", rxr->next_to_refresh); } } #ifdef EM_MULTIQUEUE /* * 82574 only: * Write a new value to the EEPROM increasing the number of MSIX * vectors from 3 to 5, for proper multiqueue support. */ static void em_enable_vectors_82574(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; device_t dev = adapter->dev; u16 edata; e1000_read_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata); printf("Current cap: %#06x\n", edata); if (((edata & EM_NVM_MSIX_N_MASK) >> EM_NVM_MSIX_N_SHIFT) != 4) { device_printf(dev, "Writing to eeprom: increasing " "reported MSIX vectors from 3 to 5...\n"); edata &= ~(EM_NVM_MSIX_N_MASK); edata |= 4 << EM_NVM_MSIX_N_SHIFT; e1000_write_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata); e1000_update_nvm_checksum(hw); device_printf(dev, "Writing to eeprom: done\n"); } } #endif #ifdef DDB DB_COMMAND(em_reset_dev, em_ddb_reset_dev) { devclass_t dc; int max_em; dc = devclass_find("em"); max_em = devclass_get_maxunit(dc); for (int index = 0; index < (max_em - 1); index++) { device_t dev; dev = devclass_get_device(dc, index); if (device_get_driver(dev) == &em_driver) { struct adapter *adapter = device_get_softc(dev); EM_CORE_LOCK(adapter); em_init_locked(adapter); EM_CORE_UNLOCK(adapter); } } } DB_COMMAND(em_dump_queue, em_ddb_dump_queue) { devclass_t dc; int max_em; dc = devclass_find("em"); max_em = devclass_get_maxunit(dc); for (int index = 0; index < (max_em - 1); index++) { device_t dev; dev = devclass_get_device(dc, index); if (device_get_driver(dev) == &em_driver) em_print_debug_info(device_get_softc(dev)); } } #endif Index: projects/release-pkg/sys/dev/e1000/if_em.h =================================================================== --- projects/release-pkg/sys/dev/e1000/if_em.h (revision 295925) +++ projects/release-pkg/sys/dev/e1000/if_em.h (revision 295926) @@ -1,558 +1,558 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _EM_H_DEFINED_ #define _EM_H_DEFINED_ /* Tunables */ /* * EM_TXD: Maximum number of Transmit Descriptors * Valid Range: 80-256 for 82542 and 82543-based adapters * 80-4096 for others * Default Value: 256 * This value is the number of transmit descriptors allocated by the driver. * Increasing this value allows the driver to queue more transmits. Each * descriptor is 16 bytes. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 */ #define EM_MIN_TXD 80 #define EM_MAX_TXD 4096 #ifdef EM_MULTIQUEUE #define EM_DEFAULT_TXD 4096 #else #define EM_DEFAULT_TXD 1024 #endif /* * EM_RXD - Maximum number of receive Descriptors * Valid Range: 80-256 for 82542 and 82543-based adapters * 80-4096 for others * Default Value: 256 * This value is the number of receive descriptors allocated by the driver. * Increasing this value allows the driver to buffer more incoming packets. * Each descriptor is 16 bytes. A receive buffer is also allocated for each * descriptor. The maximum MTU size is 16110. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 */ #define EM_MIN_RXD 80 #define EM_MAX_RXD 4096 #ifdef EM_MULTIQUEUE #define EM_DEFAULT_RXD 4096 #else #define EM_DEFAULT_RXD 1024 #endif /* * EM_TIDV - Transmit Interrupt Delay Value * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value delays the generation of transmit interrupts in units of * 1.024 microseconds. Transmit interrupt reduction can improve CPU * efficiency if properly tuned for specific network traffic. If the * system is reporting dropped transmits, this value may be set too high * causing the driver to run out of available transmit descriptors. */ #define EM_TIDV 64 /* * EM_TADV - Transmit Absolute Interrupt Delay Value * (Not valid for 82542/82543/82544) * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * transmit interrupt is generated. Useful only if EM_TIDV is non-zero, * this value ensures that an interrupt is generated after the initial * packet is sent on the wire within the set amount of time. Proper tuning, * along with EM_TIDV, may improve traffic throughput in specific * network conditions. */ #define EM_TADV 64 /* * EM_RDTR - Receive Interrupt Delay Timer (Packet Timer) * Valid Range: 0-65535 (0=off) * Default Value: 0 * This value delays the generation of receive interrupts in units of 1.024 * microseconds. Receive interrupt reduction can improve CPU efficiency if * properly tuned for specific network traffic. Increasing this value adds * extra latency to frame reception and can end up decreasing the throughput * of TCP traffic. If the system is reporting dropped receives, this value * may be set too high, causing the driver to run out of available receive * descriptors. * * CAUTION: When setting EM_RDTR to a value other than 0, adapters * may hang (stop transmitting) under certain network conditions. * If this occurs a WATCHDOG message is logged in the system * event log. In addition, the controller is automatically reset, * restoring the network connection. To eliminate the potential * for the hang ensure that EM_RDTR is set to 0. */ #ifdef EM_MULTIQUEUE #define EM_RDTR 64 #else #define EM_RDTR 0 #endif /* * Receive Interrupt Absolute Delay Timer (Not valid for 82542/82543/82544) * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * receive interrupt is generated. Useful only if EM_RDTR is non-zero, * this value ensures that an interrupt is generated after the initial * packet is received within the set amount of time. Proper tuning, * along with EM_RDTR, may improve traffic throughput in specific network * conditions. */ #ifdef EM_MULTIQUEUE #define EM_RADV 128 #else #define EM_RADV 64 #endif /* * This parameter controls the max duration of transmit watchdog. */ #define EM_WATCHDOG (10 * hz) /* * This parameter controls when the driver calls the routine to reclaim * transmit descriptors. */ #define EM_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8) /* * This parameter controls whether or not autonegotation is enabled. * 0 - Disable autonegotiation * 1 - Enable autonegotiation */ #define DO_AUTO_NEG 1 /* * This parameter control whether or not the driver will wait for * autonegotiation to complete. * 1 - Wait for autonegotiation to complete * 0 - Don't wait for autonegotiation to complete */ #define WAIT_FOR_AUTO_NEG_DEFAULT 0 /* Tunables -- End */ #define AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ ADVERTISE_1000_FULL) #define AUTO_ALL_MODES 0 /* PHY master/slave setting */ #define EM_MASTER_SLAVE e1000_ms_hw_default /* * Micellaneous constants */ #define EM_VENDOR_ID 0x8086 #define EM_FLASH 0x0014 #define EM_JUMBO_PBA 0x00000028 #define EM_DEFAULT_PBA 0x00000030 #define EM_SMARTSPEED_DOWNSHIFT 3 #define EM_SMARTSPEED_MAX 15 #define EM_MAX_LOOP 10 #define MAX_NUM_MULTICAST_ADDRESSES 128 #define PCI_ANY_ID (~0U) #define ETHER_ALIGN 2 #define EM_FC_PAUSE_TIME 0x0680 #define EM_EEPROM_APME 0x400; #define EM_82544_APME 0x0004; /* * Driver state logic for the detection of a hung state * in hardware. Set TX_HUNG whenever a TX packet is used * (data is sent) and clear it when txeof() is invoked if * any descriptors from the ring are cleaned/reclaimed. * Increment internal counter if no descriptors are cleaned * and compare to TX_MAXTRIES. When counter > TX_MAXTRIES, * reset adapter. */ #define EM_TX_IDLE 0x00000000 #define EM_TX_BUSY 0x00000001 #define EM_TX_HUNG 0x80000000 #define EM_TX_MAXTRIES 10 #define PCICFG_DESC_RING_STATUS 0xe4 #define FLUSH_DESC_REQUIRED 0x100 /* * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will * also optimize cache line size effect. H/W supports up to cache line size 128. */ #define EM_DBA_ALIGN 128 /* * See Intel 82574 Driver Programming Interface Manual, Section 10.2.6.9 */ #define TARC_COMPENSATION_MODE (1 << 7) /* Compensation Mode */ #define TARC_SPEED_MODE_BIT (1 << 21) /* On PCI-E MACs only */ #define TARC_MQ_FIX (1 << 23) | \ (1 << 24) | \ (1 << 25) /* Handle errata in MQ mode */ #define TARC_ERRATA_BIT (1 << 26) /* Note from errata on 82574 */ /* PCI Config defines */ #define EM_BAR_TYPE(v) ((v) & EM_BAR_TYPE_MASK) #define EM_BAR_TYPE_MASK 0x00000001 #define EM_BAR_TYPE_MMEM 0x00000000 #define EM_BAR_TYPE_FLASH 0x0014 #define EM_BAR_MEM_TYPE(v) ((v) & EM_BAR_MEM_TYPE_MASK) #define EM_BAR_MEM_TYPE_MASK 0x00000006 #define EM_BAR_MEM_TYPE_32BIT 0x00000000 #define EM_BAR_MEM_TYPE_64BIT 0x00000004 #define EM_MSIX_BAR 3 /* On 82575 */ /* More backward compatibility */ #if __FreeBSD_version < 900000 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD #endif /* Defines for printing debug information */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) -#define EM_MAX_SCATTER 64 +#define EM_MAX_SCATTER 40 #define EM_VFTA_SIZE 128 #define EM_TSO_SIZE (65535 + sizeof(struct ether_vlan_header)) #define EM_TSO_SEG_SIZE 4096 /* Max dma segment size */ #define EM_MSIX_MASK 0x01F00000 /* For 82574 use */ #define EM_MSIX_LINK 0x01000000 /* For 82574 use */ #define ETH_ZLEN 60 #define ETH_ADDR_LEN 6 #define CSUM_OFFLOAD 7 /* Offload bits in mbuf flag */ /* * 82574 has a nonstandard address for EIAC * and since its only used in MSIX, and in * the em driver only 82574 uses MSIX we can * solve it just using this define. */ #define EM_EIAC 0x000DC /* * 82574 only reports 3 MSI-X vectors by default; * defines assisting with making it report 5 are * located here. */ #define EM_NVM_PCIE_CTRL 0x1B #define EM_NVM_MSIX_N_MASK (0x7 << EM_NVM_MSIX_N_SHIFT) #define EM_NVM_MSIX_N_SHIFT 7 /* * Bus dma allocation structure used by * e1000_dma_malloc and e1000_dma_free. */ struct em_dma_alloc { bus_addr_t dma_paddr; caddr_t dma_vaddr; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; bus_dma_segment_t dma_seg; int dma_nseg; }; struct adapter; struct em_int_delay_info { struct adapter *adapter; /* Back-pointer to the adapter struct */ int offset; /* Register offset to read/write */ int value; /* Current value in usecs */ }; /* * The transmit ring, one per tx queue */ struct tx_ring { struct adapter *adapter; struct mtx tx_mtx; char mtx_name[16]; u32 me; u32 msix; u32 ims; int busy; struct em_dma_alloc txdma; struct e1000_tx_desc *tx_base; struct task tx_task; struct taskqueue *tq; u32 next_avail_desc; u32 next_to_clean; struct em_txbuffer *tx_buffers; volatile u16 tx_avail; u32 tx_tso; /* last tx was tso */ u16 last_hw_offload; u8 last_hw_ipcso; u8 last_hw_ipcss; u8 last_hw_tucso; u8 last_hw_tucss; #if __FreeBSD_version >= 800000 struct buf_ring *br; #endif /* Interrupt resources */ bus_dma_tag_t txtag; void *tag; struct resource *res; unsigned long tx_irq; unsigned long no_desc_avail; }; /* * The Receive ring, one per rx queue */ struct rx_ring { struct adapter *adapter; u32 me; u32 msix; u32 ims; struct mtx rx_mtx; char mtx_name[16]; u32 payload; struct task rx_task; struct taskqueue *tq; union e1000_rx_desc_extended *rx_base; struct em_dma_alloc rxdma; u32 next_to_refresh; u32 next_to_check; struct em_rxbuffer *rx_buffers; struct mbuf *fmp; struct mbuf *lmp; /* Interrupt resources */ void *tag; struct resource *res; bus_dma_tag_t rxtag; bool discard; /* Soft stats */ unsigned long rx_irq; unsigned long rx_discarded; unsigned long rx_packets; unsigned long rx_bytes; }; /* Our adapter structure */ struct adapter { if_t ifp; struct e1000_hw hw; /* FreeBSD operating-system-specific structures. */ struct e1000_osdep osdep; struct device *dev; struct cdev *led_dev; struct resource *memory; struct resource *flash; struct resource *msix_mem; struct resource *res; void *tag; u32 linkvec; u32 ivars; struct ifmedia media; struct callout timer; int msix; int if_flags; int max_frame_size; int min_frame_size; struct mtx core_mtx; int em_insert_vlan_header; u32 ims; bool in_detach; /* Task for FAST handling */ struct task link_task; struct task que_task; struct taskqueue *tq; /* private task queue */ eventhandler_tag vlan_attach; eventhandler_tag vlan_detach; u16 num_vlans; u8 num_queues; /* * Transmit rings: * Allocated at run time, an array of rings. */ struct tx_ring *tx_rings; int num_tx_desc; u32 txd_cmd; /* * Receive rings: * Allocated at run time, an array of rings. */ struct rx_ring *rx_rings; int num_rx_desc; u32 rx_process_limit; u32 rx_mbuf_sz; /* Management and WOL features */ u32 wol; bool has_manage; bool has_amt; /* Multicast array memory */ u8 *mta; /* ** Shadow VFTA table, this is needed because ** the real vlan filter table gets cleared during ** a soft reset and the driver needs to be able ** to repopulate it. */ u32 shadow_vfta[EM_VFTA_SIZE]; /* Info about the interface */ u16 link_active; u16 fc; u16 link_speed; u16 link_duplex; u32 smartspeed; struct em_int_delay_info tx_int_delay; struct em_int_delay_info tx_abs_int_delay; struct em_int_delay_info rx_int_delay; struct em_int_delay_info rx_abs_int_delay; struct em_int_delay_info tx_itr; /* Misc stats maintained by the driver */ unsigned long dropped_pkts; unsigned long link_irq; unsigned long mbuf_defrag_failed; unsigned long no_tx_dma_setup; unsigned long no_tx_map_avail; unsigned long rx_overruns; unsigned long watchdog_events; struct e1000_hw_stats stats; }; /******************************************************************************** * vendor_info_array * * This array contains the list of Subvendor/Subdevice IDs on which the driver * should load. * ********************************************************************************/ typedef struct _em_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; } em_vendor_info_t; struct em_txbuffer { int next_eop; /* Index of the desc to watch */ struct mbuf *m_head; bus_dmamap_t map; /* bus_dma map for packet */ }; struct em_rxbuffer { int next_eop; /* Index of the desc to watch */ struct mbuf *m_head; bus_dmamap_t map; /* bus_dma map for packet */ bus_addr_t paddr; }; /* ** Find the number of unrefreshed RX descriptors */ static inline u16 e1000_rx_unrefreshed(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; if (rxr->next_to_check > rxr->next_to_refresh) return (rxr->next_to_check - rxr->next_to_refresh - 1); else return ((adapter->num_rx_desc + rxr->next_to_check) - rxr->next_to_refresh - 1); } #define EM_CORE_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->core_mtx, _name, "EM Core Lock", MTX_DEF) #define EM_TX_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->tx_mtx, _name, "EM TX Lock", MTX_DEF) #define EM_RX_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->rx_mtx, _name, "EM RX Lock", MTX_DEF) #define EM_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx) #define EM_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx) #define EM_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx) #define EM_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx) #define EM_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx) #define EM_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx) #define EM_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx) #define EM_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx) #define EM_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx) #define EM_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx) #define EM_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED) #define EM_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED) #define EM_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_mtx, MA_OWNED) #define EM_RSSRK_SIZE 4 #define EM_RSSRK_VAL(key, i) (key[(i) * EM_RSSRK_SIZE] | \ key[(i) * EM_RSSRK_SIZE + 1] << 8 | \ key[(i) * EM_RSSRK_SIZE + 2] << 16 | \ key[(i) * EM_RSSRK_SIZE + 3] << 24) #endif /* _EM_H_DEFINED_ */ Index: projects/release-pkg/sys/dev/e1000/if_igb.c =================================================================== --- projects/release-pkg/sys/dev/e1000/if_igb.c (revision 295925) +++ projects/release-pkg/sys/dev/e1000/if_igb.c (revision 295926) @@ -1,6395 +1,6401 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #include "opt_altq.h" #endif #include "if_igb.h" /********************************************************************* * Driver version: *********************************************************************/ char igb_driver_version[] = "2.5.3-k"; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into e1000_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ static igb_vendor_info_t igb_vendor_info_array[] = { {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_FLASHLESS, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES_FLASHLESS, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS, 0, 0, 0}, {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII, 0, 0, 0}, /* required last entry */ {0, 0, 0, 0, 0} }; /********************************************************************* * Table of branding strings for all supported NICs. *********************************************************************/ static char *igb_strings[] = { "Intel(R) PRO/1000 Network Connection" }; /********************************************************************* * Function prototypes *********************************************************************/ static int igb_probe(device_t); static int igb_attach(device_t); static int igb_detach(device_t); static int igb_shutdown(device_t); static int igb_suspend(device_t); static int igb_resume(device_t); #ifndef IGB_LEGACY_TX static int igb_mq_start(struct ifnet *, struct mbuf *); static int igb_mq_start_locked(struct ifnet *, struct tx_ring *); static void igb_qflush(struct ifnet *); static void igb_deferred_mq_start(void *, int); #else static void igb_start(struct ifnet *); static void igb_start_locked(struct tx_ring *, struct ifnet *ifp); #endif static int igb_ioctl(struct ifnet *, u_long, caddr_t); static uint64_t igb_get_counter(if_t, ift_counter); static void igb_init(void *); static void igb_init_locked(struct adapter *); static void igb_stop(void *); static void igb_media_status(struct ifnet *, struct ifmediareq *); static int igb_media_change(struct ifnet *); static void igb_identify_hardware(struct adapter *); static int igb_allocate_pci_resources(struct adapter *); static int igb_allocate_msix(struct adapter *); static int igb_allocate_legacy(struct adapter *); static int igb_setup_msix(struct adapter *); static void igb_free_pci_resources(struct adapter *); static void igb_local_timer(void *); static void igb_reset(struct adapter *); static int igb_setup_interface(device_t, struct adapter *); static int igb_allocate_queues(struct adapter *); static void igb_configure_queues(struct adapter *); static int igb_allocate_transmit_buffers(struct tx_ring *); static void igb_setup_transmit_structures(struct adapter *); static void igb_setup_transmit_ring(struct tx_ring *); static void igb_initialize_transmit_units(struct adapter *); static void igb_free_transmit_structures(struct adapter *); static void igb_free_transmit_buffers(struct tx_ring *); static int igb_allocate_receive_buffers(struct rx_ring *); static int igb_setup_receive_structures(struct adapter *); static int igb_setup_receive_ring(struct rx_ring *); static void igb_initialize_receive_units(struct adapter *); static void igb_free_receive_structures(struct adapter *); static void igb_free_receive_buffers(struct rx_ring *); static void igb_free_receive_ring(struct rx_ring *); static void igb_enable_intr(struct adapter *); static void igb_disable_intr(struct adapter *); static void igb_update_stats_counters(struct adapter *); static bool igb_txeof(struct tx_ring *); static __inline void igb_rx_discard(struct rx_ring *, int); static __inline void igb_rx_input(struct rx_ring *, struct ifnet *, struct mbuf *, u32); static bool igb_rxeof(struct igb_queue *, int, int *); static void igb_rx_checksum(u32, struct mbuf *, u32); static int igb_tx_ctx_setup(struct tx_ring *, struct mbuf *, u32 *, u32 *); static int igb_tso_setup(struct tx_ring *, struct mbuf *, u32 *, u32 *); static void igb_set_promisc(struct adapter *); static void igb_disable_promisc(struct adapter *); static void igb_set_multi(struct adapter *); static void igb_update_link_status(struct adapter *); static void igb_refresh_mbufs(struct rx_ring *, int); static void igb_register_vlan(void *, struct ifnet *, u16); static void igb_unregister_vlan(void *, struct ifnet *, u16); static void igb_setup_vlan_hw_support(struct adapter *); static int igb_xmit(struct tx_ring *, struct mbuf **); static int igb_dma_malloc(struct adapter *, bus_size_t, struct igb_dma_alloc *, int); static void igb_dma_free(struct adapter *, struct igb_dma_alloc *); static int igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); static void igb_print_nvm_info(struct adapter *); static int igb_is_valid_ether_addr(u8 *); static void igb_add_hw_stats(struct adapter *); static void igb_vf_init_stats(struct adapter *); static void igb_update_vf_stats_counters(struct adapter *); /* Management and WOL Support */ static void igb_init_manageability(struct adapter *); static void igb_release_manageability(struct adapter *); static void igb_get_hw_control(struct adapter *); static void igb_release_hw_control(struct adapter *); static void igb_enable_wakeup(device_t); static void igb_led_func(void *, int); static int igb_irq_fast(void *); static void igb_msix_que(void *); static void igb_msix_link(void *); static void igb_handle_que(void *context, int pending); static void igb_handle_link(void *context, int pending); static void igb_handle_link_locked(struct adapter *); static void igb_set_sysctl_value(struct adapter *, const char *, const char *, int *, int); static int igb_set_flowcntl(SYSCTL_HANDLER_ARGS); static int igb_sysctl_dmac(SYSCTL_HANDLER_ARGS); static int igb_sysctl_eee(SYSCTL_HANDLER_ARGS); #ifdef DEVICE_POLLING static poll_handler_t igb_poll; #endif /* POLLING */ /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t igb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, igb_probe), DEVMETHOD(device_attach, igb_attach), DEVMETHOD(device_detach, igb_detach), DEVMETHOD(device_shutdown, igb_shutdown), DEVMETHOD(device_suspend, igb_suspend), DEVMETHOD(device_resume, igb_resume), DEVMETHOD_END }; static driver_t igb_driver = { "igb", igb_methods, sizeof(struct adapter), }; static devclass_t igb_devclass; DRIVER_MODULE(igb, pci, igb_driver, igb_devclass, 0, 0); MODULE_DEPEND(igb, pci, 1, 1, 1); MODULE_DEPEND(igb, ether, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(igb, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ /********************************************************************* * Tunable default values. *********************************************************************/ static SYSCTL_NODE(_hw, OID_AUTO, igb, CTLFLAG_RD, 0, "IGB driver parameters"); /* Descriptor defaults */ static int igb_rxd = IGB_DEFAULT_RXD; static int igb_txd = IGB_DEFAULT_TXD; SYSCTL_INT(_hw_igb, OID_AUTO, rxd, CTLFLAG_RDTUN, &igb_rxd, 0, "Number of receive descriptors per queue"); SYSCTL_INT(_hw_igb, OID_AUTO, txd, CTLFLAG_RDTUN, &igb_txd, 0, "Number of transmit descriptors per queue"); /* ** AIM: Adaptive Interrupt Moderation ** which means that the interrupt rate ** is varied over time based on the ** traffic for that interrupt vector */ static int igb_enable_aim = TRUE; SYSCTL_INT(_hw_igb, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &igb_enable_aim, 0, "Enable adaptive interrupt moderation"); /* * MSIX should be the default for best performance, * but this allows it to be forced off for testing. */ static int igb_enable_msix = 1; SYSCTL_INT(_hw_igb, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &igb_enable_msix, 0, "Enable MSI-X interrupts"); /* ** Tuneable Interrupt rate */ static int igb_max_interrupt_rate = 8000; SYSCTL_INT(_hw_igb, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, &igb_max_interrupt_rate, 0, "Maximum interrupts per second"); #ifndef IGB_LEGACY_TX /* ** Tuneable number of buffers in the buf-ring (drbr_xxx) */ static int igb_buf_ring_size = IGB_BR_SIZE; SYSCTL_INT(_hw_igb, OID_AUTO, buf_ring_size, CTLFLAG_RDTUN, &igb_buf_ring_size, 0, "Size of the bufring"); #endif /* ** Header split causes the packet header to ** be dma'd to a seperate mbuf from the payload. ** this can have memory alignment benefits. But ** another plus is that small packets often fit ** into the header and thus use no cluster. Its ** a very workload dependent type feature. */ static int igb_header_split = FALSE; SYSCTL_INT(_hw_igb, OID_AUTO, header_split, CTLFLAG_RDTUN, &igb_header_split, 0, "Enable receive mbuf header split"); /* ** This will autoconfigure based on the ** number of CPUs and max supported ** MSIX messages if left at 0. */ static int igb_num_queues = 0; SYSCTL_INT(_hw_igb, OID_AUTO, num_queues, CTLFLAG_RDTUN, &igb_num_queues, 0, "Number of queues to configure, 0 indicates autoconfigure"); /* ** Global variable to store last used CPU when binding queues ** to CPUs in igb_allocate_msix. Starts at CPU_FIRST and increments when a ** queue is bound to a cpu. */ static int igb_last_bind_cpu = -1; /* How many packets rxeof tries to clean at a time */ static int igb_rx_process_limit = 100; SYSCTL_INT(_hw_igb, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, &igb_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited"); /* How many packets txeof tries to clean at a time */ static int igb_tx_process_limit = -1; SYSCTL_INT(_hw_igb, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, &igb_tx_process_limit, 0, "Maximum number of sent packets to process at a time, -1 means unlimited"); #ifdef DEV_NETMAP /* see ixgbe.c for details */ #include #endif /* DEV_NETMAP */ /********************************************************************* * Device identification routine * * igb_probe determines if the driver should be loaded on * adapter based on PCI vendor/device id of the adapter. * * return BUS_PROBE_DEFAULT on success, positive on failure *********************************************************************/ static int igb_probe(device_t dev) { char adapter_name[256]; uint16_t pci_vendor_id = 0; uint16_t pci_device_id = 0; uint16_t pci_subvendor_id = 0; uint16_t pci_subdevice_id = 0; igb_vendor_info_t *ent; INIT_DEBUGOUT("igb_probe: begin"); pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != IGB_INTEL_VENDOR_ID) return (ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = igb_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || (ent->subvendor_id == 0)) && ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == 0))) { sprintf(adapter_name, "%s, Version - %s", igb_strings[ent->index], igb_driver_version); device_set_desc_copy(dev, adapter_name); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int igb_attach(device_t dev) { struct adapter *adapter; int error = 0; u16 eeprom_data; INIT_DEBUGOUT("igb_attach: begin"); if (resource_disabled("igb", device_get_unit(dev))) { device_printf(dev, "Disabled by device hint\n"); return (ENXIO); } adapter = device_get_softc(dev); adapter->dev = adapter->osdep.dev = dev; IGB_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); /* SYSCTLs */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, igb_sysctl_nvm_info, "I", "NVM Information"); igb_set_sysctl_value(adapter, "enable_aim", "Interrupt Moderation", &adapter->enable_aim, igb_enable_aim); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fc", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, igb_set_flowcntl, "I", "Flow Control"); callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); /* Determine hardware and mac info */ igb_identify_hardware(adapter); /* Setup PCI resources */ if (igb_allocate_pci_resources(adapter)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_pci; } /* Do Shared Code initialization */ if (e1000_setup_init_funcs(&adapter->hw, TRUE)) { device_printf(dev, "Setup of Shared code failed\n"); error = ENXIO; goto err_pci; } e1000_get_bus_info(&adapter->hw); /* Sysctls for limiting the amount of work done in the taskqueues */ igb_set_sysctl_value(adapter, "rx_processing_limit", "max number of rx packets to process", &adapter->rx_process_limit, igb_rx_process_limit); igb_set_sysctl_value(adapter, "tx_processing_limit", "max number of tx packets to process", &adapter->tx_process_limit, igb_tx_process_limit); /* * Validate number of transmit and receive descriptors. It * must not exceed hardware maximum, and must be multiple * of E1000_DBA_ALIGN. */ if (((igb_txd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN) != 0 || (igb_txd > IGB_MAX_TXD) || (igb_txd < IGB_MIN_TXD)) { device_printf(dev, "Using %d TX descriptors instead of %d!\n", IGB_DEFAULT_TXD, igb_txd); adapter->num_tx_desc = IGB_DEFAULT_TXD; } else adapter->num_tx_desc = igb_txd; if (((igb_rxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN) != 0 || (igb_rxd > IGB_MAX_RXD) || (igb_rxd < IGB_MIN_RXD)) { device_printf(dev, "Using %d RX descriptors instead of %d!\n", IGB_DEFAULT_RXD, igb_rxd); adapter->num_rx_desc = IGB_DEFAULT_RXD; } else adapter->num_rx_desc = igb_rxd; adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_wait_to_complete = FALSE; adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; /* Copper options */ if (adapter->hw.phy.media_type == e1000_media_type_copper) { adapter->hw.phy.mdix = AUTO_ALL_MODES; adapter->hw.phy.disable_polarity_correction = FALSE; adapter->hw.phy.ms_type = IGB_MASTER_SLAVE; } /* * Set the frame limits assuming * standard ethernet sized frames. */ adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; /* ** Allocate and Setup Queues */ if (igb_allocate_queues(adapter)) { error = ENOMEM; goto err_pci; } /* Allocate the appropriate stats memory */ if (adapter->vf_ifp) { adapter->stats = (struct e1000_vf_stats *)malloc(sizeof \ (struct e1000_vf_stats), M_DEVBUF, M_NOWAIT | M_ZERO); igb_vf_init_stats(adapter); } else adapter->stats = (struct e1000_hw_stats *)malloc(sizeof \ (struct e1000_hw_stats), M_DEVBUF, M_NOWAIT | M_ZERO); if (adapter->stats == NULL) { device_printf(dev, "Can not allocate stats memory\n"); error = ENOMEM; goto err_late; } /* Allocate multicast array memory. */ adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); if (adapter->mta == NULL) { device_printf(dev, "Can not allocate multicast setup array\n"); error = ENOMEM; goto err_late; } /* Some adapter-specific advanced features */ if (adapter->hw.mac.type >= e1000_i350) { SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "dmac", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, igb_sysctl_dmac, "I", "DMA Coalesce"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "eee_disabled", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, igb_sysctl_eee, "I", "Disable Energy Efficient Ethernet"); if (adapter->hw.phy.media_type == e1000_media_type_copper) { if (adapter->hw.mac.type == e1000_i354) e1000_set_eee_i354(&adapter->hw, TRUE, TRUE); else e1000_set_eee_i350(&adapter->hw, TRUE, TRUE); } } /* ** Start from a known state, this is ** important in reading the nvm and ** mac from that. */ e1000_reset_hw(&adapter->hw); /* Make sure we have a good EEPROM before we read from it */ if (((adapter->hw.mac.type != e1000_i210) && (adapter->hw.mac.type != e1000_i211)) && (e1000_validate_nvm_checksum(&adapter->hw) < 0)) { /* ** Some PCI-E parts fail the first check due to ** the link being in sleep state, call it again, ** if it fails a second time its a real issue. */ if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); error = EIO; goto err_late; } } /* ** Copy the permanent MAC address out of the EEPROM */ if (e1000_read_mac_addr(&adapter->hw) < 0) { device_printf(dev, "EEPROM read error while reading MAC" " address\n"); error = EIO; goto err_late; } /* Check its sanity */ if (!igb_is_valid_ether_addr(adapter->hw.mac.addr)) { device_printf(dev, "Invalid MAC address\n"); error = EIO; goto err_late; } /* Setup OS specific network interface */ if (igb_setup_interface(dev, adapter) != 0) goto err_late; /* Now get a good starting state */ igb_reset(adapter); /* Initialize statistics */ igb_update_stats_counters(adapter); adapter->hw.mac.get_link_status = 1; igb_update_link_status(adapter); /* Indicate SOL/IDER usage */ if (e1000_check_reset_block(&adapter->hw)) device_printf(dev, "PHY reset is blocked due to SOL/IDER session.\n"); /* Determine if we have to control management hardware */ adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw); /* * Setup Wake-on-Lan */ /* APME bit in EEPROM is mapped to WUC.APME */ eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) & E1000_WUC_APME; if (eeprom_data) adapter->wol = E1000_WUFC_MAG; /* Register for VLAN events */ adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); igb_add_hw_stats(adapter); /* Tell the stack that the interface is not active */ adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; adapter->ifp->if_drv_flags |= IFF_DRV_OACTIVE; adapter->led_dev = led_create(igb_led_func, adapter, device_get_nameunit(dev)); /* ** Configure Interrupts */ if ((adapter->msix > 1) && (igb_enable_msix)) error = igb_allocate_msix(adapter); else /* MSI or Legacy */ error = igb_allocate_legacy(adapter); if (error) goto err_late; #ifdef DEV_NETMAP igb_netmap_attach(adapter); #endif /* DEV_NETMAP */ INIT_DEBUGOUT("igb_attach: end"); return (0); err_late: igb_detach(dev); igb_free_transmit_structures(adapter); igb_free_receive_structures(adapter); igb_release_hw_control(adapter); err_pci: igb_free_pci_resources(adapter); if (adapter->ifp != NULL) if_free(adapter->ifp); free(adapter->mta, M_DEVBUF); IGB_CORE_LOCK_DESTROY(adapter); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int igb_detach(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct ifnet *ifp = adapter->ifp; INIT_DEBUGOUT("igb_detach: begin"); /* Make sure VLANS are not using driver */ if (adapter->ifp->if_vlantrunk != NULL) { device_printf(dev,"Vlan in use, detach first\n"); return (EBUSY); } ether_ifdetach(adapter->ifp); if (adapter->led_dev != NULL) led_destroy(adapter->led_dev); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif IGB_CORE_LOCK(adapter); adapter->in_detach = 1; igb_stop(adapter); IGB_CORE_UNLOCK(adapter); e1000_phy_hw_reset(&adapter->hw); /* Give control back to firmware */ igb_release_manageability(adapter); igb_release_hw_control(adapter); if (adapter->wol) { E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); igb_enable_wakeup(dev); } /* Unregister VLAN events */ if (adapter->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); if (adapter->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); callout_drain(&adapter->timer); #ifdef DEV_NETMAP netmap_detach(adapter->ifp); #endif /* DEV_NETMAP */ igb_free_pci_resources(adapter); bus_generic_detach(dev); if_free(ifp); igb_free_transmit_structures(adapter); igb_free_receive_structures(adapter); if (adapter->mta != NULL) free(adapter->mta, M_DEVBUF); IGB_CORE_LOCK_DESTROY(adapter); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int igb_shutdown(device_t dev) { return igb_suspend(dev); } /* * Suspend/resume device methods. */ static int igb_suspend(device_t dev) { struct adapter *adapter = device_get_softc(dev); IGB_CORE_LOCK(adapter); igb_stop(adapter); igb_release_manageability(adapter); igb_release_hw_control(adapter); if (adapter->wol) { E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); igb_enable_wakeup(dev); } IGB_CORE_UNLOCK(adapter); return bus_generic_suspend(dev); } static int igb_resume(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct tx_ring *txr = adapter->tx_rings; struct ifnet *ifp = adapter->ifp; IGB_CORE_LOCK(adapter); igb_init_locked(adapter); igb_init_manageability(adapter); if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING) && adapter->link_active) { for (int i = 0; i < adapter->num_queues; i++, txr++) { IGB_TX_LOCK(txr); #ifndef IGB_LEGACY_TX /* Process the stack queue only if not depleted */ if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && !drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) igb_start_locked(txr, ifp); #endif IGB_TX_UNLOCK(txr); } } IGB_CORE_UNLOCK(adapter); return bus_generic_resume(dev); } #ifdef IGB_LEGACY_TX /********************************************************************* * Transmit entry point * * igb_start is called by the stack to initiate a transmit. * The driver will remain in this routine as long as there are * packets to transmit and transmit resources are available. * In case resources are not available stack is notified and * the packet is requeued. **********************************************************************/ static void igb_start_locked(struct tx_ring *txr, struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct mbuf *m_head; IGB_TX_LOCK_ASSERT(txr); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; if (!adapter->link_active) return; /* Call cleanup if number of TX descriptors low */ if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD) igb_txeof(txr); while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if (txr->tx_avail <= IGB_MAX_SCATTER) { txr->queue_status |= IGB_QUEUE_DEPLETED; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Encapsulation can modify our pointer, and or make it * NULL on failure. In that event, we can't requeue. */ if (igb_xmit(txr, &m_head)) { if (m_head != NULL) IFQ_DRV_PREPEND(&ifp->if_snd, m_head); if (txr->tx_avail <= IGB_MAX_SCATTER) txr->queue_status |= IGB_QUEUE_DEPLETED; break; } /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, m_head); /* Set watchdog on */ txr->watchdog_time = ticks; txr->queue_status |= IGB_QUEUE_WORKING; } } /* * Legacy TX driver routine, called from the * stack, always uses tx[0], and spins for it. * Should not be used with multiqueue tx */ static void igb_start(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = adapter->tx_rings; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IGB_TX_LOCK(txr); igb_start_locked(txr, ifp); IGB_TX_UNLOCK(txr); } return; } #else /* ~IGB_LEGACY_TX */ /* ** Multiqueue Transmit Entry: ** quick turnaround to the stack ** */ static int igb_mq_start(struct ifnet *ifp, struct mbuf *m) { struct adapter *adapter = ifp->if_softc; struct igb_queue *que; struct tx_ring *txr; int i, err = 0; #ifdef RSS uint32_t bucket_id; #endif /* Which queue to use */ /* * When doing RSS, map it to the same outbound queue * as the incoming flow would be mapped to. * * If everything is setup correctly, it should be the * same bucket that the current CPU we're on is. */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { #ifdef RSS if (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m), &bucket_id) == 0) { /* XXX TODO: spit out something if bucket_id > num_queues? */ i = bucket_id % adapter->num_queues; } else { #endif i = m->m_pkthdr.flowid % adapter->num_queues; #ifdef RSS } #endif } else { i = curcpu % adapter->num_queues; } txr = &adapter->tx_rings[i]; que = &adapter->queues[i]; err = drbr_enqueue(ifp, txr->br, m); if (err) return (err); if (IGB_TX_TRYLOCK(txr)) { igb_mq_start_locked(ifp, txr); IGB_TX_UNLOCK(txr); } else taskqueue_enqueue(que->tq, &txr->txq_task); return (0); } static int igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct mbuf *next; int err = 0, enq = 0; IGB_TX_LOCK_ASSERT(txr); if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) || adapter->link_active == 0) return (ENETDOWN); /* Process the queue */ while ((next = drbr_peek(ifp, txr->br)) != NULL) { if ((err = igb_xmit(txr, &next)) != 0) { if (next == NULL) { /* It was freed, move forward */ drbr_advance(ifp, txr->br); } else { /* * Still have one left, it may not be * the same since the transmit function * may have changed it. */ drbr_putback(ifp, txr->br, next); } break; } drbr_advance(ifp, txr->br); enq++; if (next->m_flags & M_MCAST && adapter->vf_ifp) if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); ETHER_BPF_MTAP(ifp, next); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; } if (enq > 0) { /* Set the watchdog */ txr->queue_status |= IGB_QUEUE_WORKING; txr->watchdog_time = ticks; } if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD) igb_txeof(txr); if (txr->tx_avail <= IGB_MAX_SCATTER) txr->queue_status |= IGB_QUEUE_DEPLETED; return (err); } /* * Called from a taskqueue to drain queued transmit packets. */ static void igb_deferred_mq_start(void *arg, int pending) { struct tx_ring *txr = arg; struct adapter *adapter = txr->adapter; struct ifnet *ifp = adapter->ifp; IGB_TX_LOCK(txr); if (!drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr); IGB_TX_UNLOCK(txr); } /* ** Flush all ring buffers */ static void igb_qflush(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = adapter->tx_rings; struct mbuf *m; for (int i = 0; i < adapter->num_queues; i++, txr++) { IGB_TX_LOCK(txr); while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) m_freem(m); IGB_TX_UNLOCK(txr); } if_qflush(ifp); } #endif /* ~IGB_LEGACY_TX */ /********************************************************************* * Ioctl entry point * * igb_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ static int igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct adapter *adapter = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; #endif bool avoid_reset = FALSE; int error = 0; if (adapter->in_detach) return (error); switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) igb_init(adapter); #ifdef INET if (!(ifp->if_flags & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else error = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: { int max_frame_size; IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); IGB_CORE_LOCK(adapter); max_frame_size = 9234; if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) { IGB_CORE_UNLOCK(adapter); error = EINVAL; break; } ifp->if_mtu = ifr->ifr_mtu; adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; igb_init_locked(adapter); IGB_CORE_UNLOCK(adapter); break; } case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl rcv'd:\ SIOCSIFFLAGS (Set Interface Flags)"); IGB_CORE_LOCK(adapter); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { if ((ifp->if_flags ^ adapter->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { igb_disable_promisc(adapter); igb_set_promisc(adapter); } } else igb_init_locked(adapter); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) igb_stop(adapter); adapter->if_flags = ifp->if_flags; IGB_CORE_UNLOCK(adapter); break; case SIOCADDMULTI: case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IGB_CORE_LOCK(adapter); igb_disable_intr(adapter); igb_set_multi(adapter); #ifdef DEVICE_POLLING if (!(ifp->if_capenable & IFCAP_POLLING)) #endif igb_enable_intr(adapter); IGB_CORE_UNLOCK(adapter); } break; case SIOCSIFMEDIA: /* Check SOL/IDER usage */ IGB_CORE_LOCK(adapter); if (e1000_check_reset_block(&adapter->hw)) { IGB_CORE_UNLOCK(adapter); device_printf(adapter->dev, "Media change is" " blocked due to SOL/IDER session.\n"); break; } IGB_CORE_UNLOCK(adapter); case SIOCGIFMEDIA: IOCTL_DEBUGOUT("ioctl rcv'd: \ SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: { int mask, reinit; IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); reinit = 0; mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(igb_poll, ifp); if (error) return (error); IGB_CORE_LOCK(adapter); igb_disable_intr(adapter); ifp->if_capenable |= IFCAP_POLLING; IGB_CORE_UNLOCK(adapter); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ IGB_CORE_LOCK(adapter); igb_enable_intr(adapter); ifp->if_capenable &= ~IFCAP_POLLING; IGB_CORE_UNLOCK(adapter); } } #endif if (mask & IFCAP_HWCSUM) { ifp->if_capenable ^= IFCAP_HWCSUM; reinit = 1; } if (mask & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; reinit = 1; } if (mask & IFCAP_TSO6) { ifp->if_capenable ^= IFCAP_TSO6; reinit = 1; } if (mask & IFCAP_VLAN_HWTAGGING) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; reinit = 1; } if (mask & IFCAP_VLAN_HWFILTER) { ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; reinit = 1; } if (mask & IFCAP_VLAN_HWTSO) { ifp->if_capenable ^= IFCAP_VLAN_HWTSO; reinit = 1; } if (mask & IFCAP_LRO) { ifp->if_capenable ^= IFCAP_LRO; reinit = 1; } if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) igb_init(adapter); VLAN_CAPABILITIES(ifp); break; } default: error = ether_ioctl(ifp, command, data); break; } return (error); } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * * return 0 on success, positive on failure **********************************************************************/ static void igb_init_locked(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; device_t dev = adapter->dev; INIT_DEBUGOUT("igb_init: begin"); IGB_CORE_LOCK_ASSERT(adapter); igb_disable_intr(adapter); callout_stop(&adapter->timer); /* Get the latest mac address, User can use a LAA */ bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN); /* Put the address into the Receive Address Array */ e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); igb_reset(adapter); igb_update_link_status(adapter); E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); /* Set hardware offload abilities */ ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TXCSUM) { ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); #if __FreeBSD_version >= 800000 if ((adapter->hw.mac.type == e1000_82576) || (adapter->hw.mac.type == e1000_82580)) ifp->if_hwassist |= CSUM_SCTP; #endif } if (ifp->if_capenable & IFCAP_TSO) ifp->if_hwassist |= CSUM_TSO; /* Clear bad data from Rx FIFOs */ e1000_rx_fifo_flush_82575(&adapter->hw); /* Configure for OS presence */ igb_init_manageability(adapter); /* Prepare transmit descriptors and buffers */ igb_setup_transmit_structures(adapter); igb_initialize_transmit_units(adapter); /* Setup Multicast table */ igb_set_multi(adapter); /* ** Figure out the desired mbuf pool ** for doing jumbo/packetsplit */ if (adapter->max_frame_size <= 2048) adapter->rx_mbuf_sz = MCLBYTES; else if (adapter->max_frame_size <= 4096) adapter->rx_mbuf_sz = MJUMPAGESIZE; else adapter->rx_mbuf_sz = MJUM9BYTES; /* Prepare receive descriptors and buffers */ if (igb_setup_receive_structures(adapter)) { device_printf(dev, "Could not setup receive structures\n"); return; } igb_initialize_receive_units(adapter); /* Enable VLAN support */ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) igb_setup_vlan_hw_support(adapter); /* Don't lose promiscuous settings */ igb_set_promisc(adapter); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&adapter->timer, hz, igb_local_timer, adapter); e1000_clear_hw_cntrs_base_generic(&adapter->hw); if (adapter->msix > 1) /* Set up queue routing */ igb_configure_queues(adapter); /* this clears any pending interrupts */ E1000_READ_REG(&adapter->hw, E1000_ICR); #ifdef DEVICE_POLLING /* * Only enable interrupts if we are not polling, make sure * they are off otherwise. */ if (ifp->if_capenable & IFCAP_POLLING) igb_disable_intr(adapter); else #endif /* DEVICE_POLLING */ { igb_enable_intr(adapter); E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC); } /* Set Energy Efficient Ethernet */ if (adapter->hw.phy.media_type == e1000_media_type_copper) { if (adapter->hw.mac.type == e1000_i354) e1000_set_eee_i354(&adapter->hw, TRUE, TRUE); else e1000_set_eee_i350(&adapter->hw, TRUE, TRUE); } } static void igb_init(void *arg) { struct adapter *adapter = arg; IGB_CORE_LOCK(adapter); igb_init_locked(adapter); IGB_CORE_UNLOCK(adapter); } static void igb_handle_que(void *context, int pending) { struct igb_queue *que = context; struct adapter *adapter = que->adapter; struct tx_ring *txr = que->txr; struct ifnet *ifp = adapter->ifp; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { bool more; more = igb_rxeof(que, adapter->rx_process_limit, NULL); IGB_TX_LOCK(txr); igb_txeof(txr); #ifndef IGB_LEGACY_TX /* Process the stack queue only if not depleted */ if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && !drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) igb_start_locked(txr, ifp); #endif IGB_TX_UNLOCK(txr); /* Do we need another? */ if (more) { taskqueue_enqueue(que->tq, &que->que_task); return; } } #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) return; #endif /* Reenable this interrupt */ if (que->eims) E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims); else igb_enable_intr(adapter); } /* Deal with link in a sleepable context */ static void igb_handle_link(void *context, int pending) { struct adapter *adapter = context; IGB_CORE_LOCK(adapter); igb_handle_link_locked(adapter); IGB_CORE_UNLOCK(adapter); } static void igb_handle_link_locked(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; struct ifnet *ifp = adapter->ifp; IGB_CORE_LOCK_ASSERT(adapter); adapter->hw.mac.get_link_status = 1; igb_update_link_status(adapter); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && adapter->link_active) { for (int i = 0; i < adapter->num_queues; i++, txr++) { IGB_TX_LOCK(txr); #ifndef IGB_LEGACY_TX /* Process the stack queue only if not depleted */ if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && !drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) igb_start_locked(txr, ifp); #endif IGB_TX_UNLOCK(txr); } } } /********************************************************************* * * MSI/Legacy Deferred * Interrupt Service routine * *********************************************************************/ static int igb_irq_fast(void *arg) { struct adapter *adapter = arg; struct igb_queue *que = adapter->queues; u32 reg_icr; reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); /* Hot eject? */ if (reg_icr == 0xffffffff) return FILTER_STRAY; /* Definitely not our interrupt. */ if (reg_icr == 0x0) return FILTER_STRAY; if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) return FILTER_STRAY; /* * Mask interrupts until the taskqueue is finished running. This is * cheap, just assume that it is needed. This also works around the * MSI message reordering errata on certain systems. */ igb_disable_intr(adapter); taskqueue_enqueue(que->tq, &que->que_task); /* Link status change */ if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) taskqueue_enqueue(que->tq, &adapter->link_task); if (reg_icr & E1000_ICR_RXO) adapter->rx_overruns++; return FILTER_HANDLED; } #ifdef DEVICE_POLLING #if __FreeBSD_version >= 800000 #define POLL_RETURN_COUNT(a) (a) static int #else #define POLL_RETURN_COUNT(a) static void #endif igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct adapter *adapter = ifp->if_softc; struct igb_queue *que; struct tx_ring *txr; u32 reg_icr, rx_done = 0; u32 loop = IGB_MAX_LOOP; bool more; IGB_CORE_LOCK(adapter); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { IGB_CORE_UNLOCK(adapter); return POLL_RETURN_COUNT(rx_done); } if (cmd == POLL_AND_CHECK_STATUS) { reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); /* Link status change */ if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) igb_handle_link_locked(adapter); if (reg_icr & E1000_ICR_RXO) adapter->rx_overruns++; } IGB_CORE_UNLOCK(adapter); for (int i = 0; i < adapter->num_queues; i++) { que = &adapter->queues[i]; txr = que->txr; igb_rxeof(que, count, &rx_done); IGB_TX_LOCK(txr); do { more = igb_txeof(txr); } while (loop-- && more); #ifndef IGB_LEGACY_TX if (!drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) igb_start_locked(txr, ifp); #endif IGB_TX_UNLOCK(txr); } return POLL_RETURN_COUNT(rx_done); } #endif /* DEVICE_POLLING */ /********************************************************************* * * MSIX Que Interrupt Service routine * **********************************************************************/ static void igb_msix_que(void *arg) { struct igb_queue *que = arg; struct adapter *adapter = que->adapter; struct ifnet *ifp = adapter->ifp; struct tx_ring *txr = que->txr; struct rx_ring *rxr = que->rxr; u32 newitr = 0; bool more_rx; /* Ignore spurious interrupts */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; E1000_WRITE_REG(&adapter->hw, E1000_EIMC, que->eims); ++que->irqs; IGB_TX_LOCK(txr); igb_txeof(txr); #ifndef IGB_LEGACY_TX /* Process the stack queue only if not depleted */ if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && !drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) igb_start_locked(txr, ifp); #endif IGB_TX_UNLOCK(txr); more_rx = igb_rxeof(que, adapter->rx_process_limit, NULL); if (adapter->enable_aim == FALSE) goto no_calc; /* ** Do Adaptive Interrupt Moderation: ** - Write out last calculated setting ** - Calculate based on average size over ** the last interval. */ if (que->eitr_setting) E1000_WRITE_REG(&adapter->hw, E1000_EITR(que->msix), que->eitr_setting); que->eitr_setting = 0; /* Idle, do nothing */ if ((txr->bytes == 0) && (rxr->bytes == 0)) goto no_calc; /* Used half Default if sub-gig */ if (adapter->link_speed != 1000) newitr = IGB_DEFAULT_ITR / 2; else { if ((txr->bytes) && (txr->packets)) newitr = txr->bytes/txr->packets; if ((rxr->bytes) && (rxr->packets)) newitr = max(newitr, (rxr->bytes / rxr->packets)); newitr += 24; /* account for hardware frame, crc */ /* set an upper boundary */ newitr = min(newitr, 3000); /* Be nice to the mid range */ if ((newitr > 300) && (newitr < 1200)) newitr = (newitr / 3); else newitr = (newitr / 2); } newitr &= 0x7FFC; /* Mask invalid bits */ if (adapter->hw.mac.type == e1000_82575) newitr |= newitr << 16; else newitr |= E1000_EITR_CNT_IGNR; /* save for next interrupt */ que->eitr_setting = newitr; /* Reset state */ txr->bytes = 0; txr->packets = 0; rxr->bytes = 0; rxr->packets = 0; no_calc: /* Schedule a clean task if needed*/ if (more_rx) taskqueue_enqueue(que->tq, &que->que_task); else /* Reenable this interrupt */ E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims); return; } /********************************************************************* * * MSIX Link Interrupt Service routine * **********************************************************************/ static void igb_msix_link(void *arg) { struct adapter *adapter = arg; u32 icr; ++adapter->link_irq; icr = E1000_READ_REG(&adapter->hw, E1000_ICR); if (!(icr & E1000_ICR_LSC)) goto spurious; igb_handle_link(adapter, 0); spurious: /* Rearm */ E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC); E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask); return; } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct adapter *adapter = ifp->if_softc; INIT_DEBUGOUT("igb_media_status: begin"); IGB_CORE_LOCK(adapter); igb_update_link_status(adapter); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->link_active) { IGB_CORE_UNLOCK(adapter); return; } ifmr->ifm_status |= IFM_ACTIVE; switch (adapter->link_speed) { case 10: ifmr->ifm_active |= IFM_10_T; break; case 100: /* ** Support for 100Mb SFP - these are Fiber ** but the media type appears as serdes */ if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) ifmr->ifm_active |= IFM_100_FX; else ifmr->ifm_active |= IFM_100_TX; break; case 1000: ifmr->ifm_active |= IFM_1000_T; break; case 2500: ifmr->ifm_active |= IFM_2500_SX; break; } if (adapter->link_duplex == FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; IGB_CORE_UNLOCK(adapter); } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int igb_media_change(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct ifmedia *ifm = &adapter->media; INIT_DEBUGOUT("igb_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); IGB_CORE_LOCK(adapter); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; break; case IFM_1000_LX: case IFM_1000_SX: case IFM_1000_T: adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; break; case IFM_100_TX: adapter->hw.mac.autoneg = FALSE; adapter->hw.phy.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; else adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; break; case IFM_10_T: adapter->hw.mac.autoneg = FALSE; adapter->hw.phy.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; else adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; break; default: device_printf(adapter->dev, "Unsupported media type\n"); } igb_init_locked(adapter); IGB_CORE_UNLOCK(adapter); return (0); } /********************************************************************* * * This routine maps the mbufs to Advanced TX descriptors. * **********************************************************************/ static int igb_xmit(struct tx_ring *txr, struct mbuf **m_headp) { struct adapter *adapter = txr->adapter; u32 olinfo_status = 0, cmd_type_len; int i, j, error, nsegs; int first; bool remap = TRUE; struct mbuf *m_head; bus_dma_segment_t segs[IGB_MAX_SCATTER]; bus_dmamap_t map; struct igb_tx_buf *txbuf; union e1000_adv_tx_desc *txd = NULL; m_head = *m_headp; /* Basic descriptor defines */ cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT); if (m_head->m_flags & M_VLANTAG) cmd_type_len |= E1000_ADVTXD_DCMD_VLE; /* * Important to capture the first descriptor * used because it will contain the index of * the one we tell the hardware to report back */ first = txr->next_avail_desc; txbuf = &txr->tx_buffers[first]; map = txbuf->map; /* * Map the packet for DMA. */ retry: error = bus_dmamap_load_mbuf_sg(txr->txtag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(error)) { struct mbuf *m; switch (error) { case EFBIG: /* Try it again? - one try */ if (remap == TRUE) { remap = FALSE; m = m_collapse(*m_headp, M_NOWAIT, IGB_MAX_SCATTER); if (m == NULL) { adapter->mbuf_defrag_failed++; m_freem(*m_headp); *m_headp = NULL; return (ENOBUFS); } *m_headp = m; goto retry; } else return (error); default: txr->no_tx_dma_setup++; m_freem(*m_headp); *m_headp = NULL; return (error); } } /* Make certain there are enough descriptors */ if (nsegs > txr->tx_avail - 2) { txr->no_desc_avail++; bus_dmamap_unload(txr->txtag, map); return (ENOBUFS); } m_head = *m_headp; /* ** Set up the appropriate offload context ** this will consume the first descriptor */ error = igb_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status); if (__predict_false(error)) { m_freem(*m_headp); *m_headp = NULL; return (error); } /* 82575 needs the queue index added */ if (adapter->hw.mac.type == e1000_82575) olinfo_status |= txr->me << 4; i = txr->next_avail_desc; for (j = 0; j < nsegs; j++) { bus_size_t seglen; bus_addr_t segaddr; txbuf = &txr->tx_buffers[i]; txd = &txr->tx_base[i]; seglen = segs[j].ds_len; segaddr = htole64(segs[j].ds_addr); txd->read.buffer_addr = segaddr; txd->read.cmd_type_len = htole32(E1000_TXD_CMD_IFCS | cmd_type_len | seglen); txd->read.olinfo_status = htole32(olinfo_status); if (++i == txr->num_desc) i = 0; } txd->read.cmd_type_len |= htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS); txr->tx_avail -= nsegs; txr->next_avail_desc = i; txbuf->m_head = m_head; /* ** Here we swap the map so the last descriptor, ** which gets the completion interrupt has the ** real map, and the first descriptor gets the ** unused map from this descriptor. */ txr->tx_buffers[first].map = txbuf->map; txbuf->map = map; bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE); /* Set the EOP descriptor that will be marked done */ txbuf = &txr->tx_buffers[first]; txbuf->eop = txd; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * Advance the Transmit Descriptor Tail (Tdt), this tells the * hardware that this frame is available to transmit. */ ++txr->total_packets; E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i); return (0); } static void igb_set_promisc(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; struct e1000_hw *hw = &adapter->hw; u32 reg; if (adapter->vf_ifp) { e1000_promisc_set_vf(hw, e1000_promisc_enabled); return; } reg = E1000_READ_REG(hw, E1000_RCTL); if (ifp->if_flags & IFF_PROMISC) { reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); E1000_WRITE_REG(hw, E1000_RCTL, reg); } else if (ifp->if_flags & IFF_ALLMULTI) { reg |= E1000_RCTL_MPE; reg &= ~E1000_RCTL_UPE; E1000_WRITE_REG(hw, E1000_RCTL, reg); } } static void igb_disable_promisc(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct ifnet *ifp = adapter->ifp; u32 reg; int mcnt = 0; if (adapter->vf_ifp) { e1000_promisc_set_vf(hw, e1000_promisc_disabled); return; } reg = E1000_READ_REG(hw, E1000_RCTL); reg &= (~E1000_RCTL_UPE); if (ifp->if_flags & IFF_ALLMULTI) mcnt = MAX_NUM_MULTICAST_ADDRESSES; else { struct ifmultiaddr *ifma; #if __FreeBSD_version < 800000 IF_ADDR_LOCK(ifp); #else if_maddr_rlock(ifp); #endif TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break; mcnt++; } #if __FreeBSD_version < 800000 IF_ADDR_UNLOCK(ifp); #else if_maddr_runlock(ifp); #endif } /* Don't disable if in MAX groups */ if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) reg &= (~E1000_RCTL_MPE); E1000_WRITE_REG(hw, E1000_RCTL, reg); } /********************************************************************* * Multicast Update * * This routine is called whenever multicast address list is updated. * **********************************************************************/ static void igb_set_multi(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; struct ifmultiaddr *ifma; u32 reg_rctl = 0; u8 *mta; int mcnt = 0; IOCTL_DEBUGOUT("igb_set_multi: begin"); mta = adapter->mta; bzero(mta, sizeof(uint8_t) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); #if __FreeBSD_version < 800000 IF_ADDR_LOCK(ifp); #else if_maddr_rlock(ifp); #endif TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); mcnt++; } #if __FreeBSD_version < 800000 IF_ADDR_UNLOCK(ifp); #else if_maddr_runlock(ifp); #endif if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl |= E1000_RCTL_MPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } else e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); } /********************************************************************* * Timer routine: * This routine checks for link status, * updates statistics, and does the watchdog. * **********************************************************************/ static void igb_local_timer(void *arg) { struct adapter *adapter = arg; device_t dev = adapter->dev; struct ifnet *ifp = adapter->ifp; struct tx_ring *txr = adapter->tx_rings; struct igb_queue *que = adapter->queues; int hung = 0, busy = 0; IGB_CORE_LOCK_ASSERT(adapter); igb_update_link_status(adapter); igb_update_stats_counters(adapter); /* ** Check the TX queues status ** - central locked handling of OACTIVE ** - watchdog only if all queues show hung */ for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { if ((txr->queue_status & IGB_QUEUE_HUNG) && (adapter->pause_frames == 0)) ++hung; if (txr->queue_status & IGB_QUEUE_DEPLETED) ++busy; if ((txr->queue_status & IGB_QUEUE_IDLE) == 0) taskqueue_enqueue(que->tq, &que->que_task); } if (hung == adapter->num_queues) goto timeout; if (busy == adapter->num_queues) ifp->if_drv_flags |= IFF_DRV_OACTIVE; else if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) && (busy < adapter->num_queues)) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; adapter->pause_frames = 0; callout_reset(&adapter->timer, hz, igb_local_timer, adapter); #ifndef DEVICE_POLLING /* Schedule all queue interrupts - deadlock protection */ E1000_WRITE_REG(&adapter->hw, E1000_EICS, adapter->que_mask); #endif return; timeout: device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, E1000_READ_REG(&adapter->hw, E1000_TDH(txr->me)), E1000_READ_REG(&adapter->hw, E1000_TDT(txr->me))); device_printf(dev,"TX(%d) desc avail = %d," "Next TX to Clean = %d\n", txr->me, txr->tx_avail, txr->next_to_clean); adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; adapter->watchdog_events++; igb_init_locked(adapter); } static void igb_update_link_status(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_fc_info *fc = &hw->fc; struct ifnet *ifp = adapter->ifp; device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; u32 link_check, thstat, ctrl; char *flowctl = NULL; link_check = thstat = ctrl = 0; /* Get the cached link value or read for real */ switch (hw->phy.media_type) { case e1000_media_type_copper: if (hw->mac.get_link_status) { /* Do the work to read phy */ e1000_check_for_link(hw); link_check = !hw->mac.get_link_status; } else link_check = TRUE; break; case e1000_media_type_fiber: e1000_check_for_link(hw); link_check = (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU); break; case e1000_media_type_internal_serdes: e1000_check_for_link(hw); link_check = adapter->hw.mac.serdes_has_link; break; /* VF device is type_unknown */ case e1000_media_type_unknown: e1000_check_for_link(hw); link_check = !hw->mac.get_link_status; /* Fall thru */ default: break; } /* Check for thermal downshift or shutdown */ if (hw->mac.type == e1000_i350) { thstat = E1000_READ_REG(hw, E1000_THSTAT); ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT); } /* Get the flow control for display */ switch (fc->current_mode) { case e1000_fc_rx_pause: flowctl = "RX"; break; case e1000_fc_tx_pause: flowctl = "TX"; break; case e1000_fc_full: flowctl = "Full"; break; case e1000_fc_none: default: flowctl = "None"; break; } /* Now we check if a transition has happened */ if (link_check && (adapter->link_active == 0)) { e1000_get_speed_and_duplex(&adapter->hw, &adapter->link_speed, &adapter->link_duplex); if (bootverbose) device_printf(dev, "Link is up %d Mbps %s," " Flow Control: %s\n", adapter->link_speed, ((adapter->link_duplex == FULL_DUPLEX) ? "Full Duplex" : "Half Duplex"), flowctl); adapter->link_active = 1; ifp->if_baudrate = adapter->link_speed * 1000000; if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && (thstat & E1000_THSTAT_LINK_THROTTLE)) device_printf(dev, "Link: thermal downshift\n"); /* Delay Link Up for Phy update */ if (((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) && (hw->phy.id == I210_I_PHY_ID)) msec_delay(I210_LINK_DELAY); /* Reset if the media type changed. */ if (hw->dev_spec._82575.media_changed) { hw->dev_spec._82575.media_changed = false; adapter->flags |= IGB_MEDIA_RESET; igb_reset(adapter); } /* This can sleep */ if_link_state_change(ifp, LINK_STATE_UP); } else if (!link_check && (adapter->link_active == 1)) { ifp->if_baudrate = adapter->link_speed = 0; adapter->link_duplex = 0; if (bootverbose) device_printf(dev, "Link is Down\n"); if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && (thstat & E1000_THSTAT_PWR_DOWN)) device_printf(dev, "Link: thermal shutdown\n"); adapter->link_active = 0; /* This can sleep */ if_link_state_change(ifp, LINK_STATE_DOWN); /* Reset queue state */ for (int i = 0; i < adapter->num_queues; i++, txr++) txr->queue_status = IGB_QUEUE_IDLE; } } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * **********************************************************************/ static void igb_stop(void *arg) { struct adapter *adapter = arg; struct ifnet *ifp = adapter->ifp; struct tx_ring *txr = adapter->tx_rings; IGB_CORE_LOCK_ASSERT(adapter); INIT_DEBUGOUT("igb_stop: begin"); igb_disable_intr(adapter); callout_stop(&adapter->timer); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ifp->if_drv_flags |= IFF_DRV_OACTIVE; /* Disarm watchdog timer. */ for (int i = 0; i < adapter->num_queues; i++, txr++) { IGB_TX_LOCK(txr); txr->queue_status = IGB_QUEUE_IDLE; IGB_TX_UNLOCK(txr); } e1000_reset_hw(&adapter->hw); E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); e1000_led_off(&adapter->hw); e1000_cleanup_led(&adapter->hw); } /********************************************************************* * * Determine hardware revision. * **********************************************************************/ static void igb_identify_hardware(struct adapter *adapter) { device_t dev = adapter->dev; /* Make sure our PCI config space has the necessary stuff set */ pci_enable_busmaster(dev); adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); /* Save off the information about this board */ adapter->hw.vendor_id = pci_get_vendor(dev); adapter->hw.device_id = pci_get_device(dev); adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); adapter->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); /* Set MAC type early for PCI setup */ e1000_set_mac_type(&adapter->hw); /* Are we a VF device? */ if ((adapter->hw.mac.type == e1000_vfadapt) || (adapter->hw.mac.type == e1000_vfadapt_i350)) adapter->vf_ifp = 1; else adapter->vf_ifp = 0; } static int igb_allocate_pci_resources(struct adapter *adapter) { device_t dev = adapter->dev; int rid; rid = PCIR_BAR(0); adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->pci_mem == NULL) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->pci_mem); adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; adapter->num_queues = 1; /* Defaults for Legacy or MSI */ /* This will setup either MSI/X or MSI */ adapter->msix = igb_setup_msix(adapter); adapter->hw.back = &adapter->osdep; return (0); } /********************************************************************* * * Setup the Legacy or MSI Interrupt handler * **********************************************************************/ static int igb_allocate_legacy(struct adapter *adapter) { device_t dev = adapter->dev; struct igb_queue *que = adapter->queues; #ifndef IGB_LEGACY_TX struct tx_ring *txr = adapter->tx_rings; #endif int error, rid = 0; /* Turn off all interrupts */ E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); /* MSI RID is 1 */ if (adapter->msix == 1) rid = 1; /* We allocate a single interrupt resource */ adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (adapter->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "interrupt\n"); return (ENXIO); } #ifndef IGB_LEGACY_TX TASK_INIT(&txr->txq_task, 0, igb_deferred_mq_start, txr); #endif /* * Try allocating a fast interrupt and the associated deferred * processing contexts. */ TASK_INIT(&que->que_task, 0, igb_handle_que, que); /* Make tasklet for deferred link handling */ TASK_INIT(&adapter->link_task, 0, igb_handle_link, adapter); que->tq = taskqueue_create_fast("igb_taskq", M_NOWAIT, taskqueue_thread_enqueue, &que->tq); taskqueue_start_threads(&que->tq, 1, PI_NET, "%s taskq", device_get_nameunit(adapter->dev)); if ((error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE, igb_irq_fast, NULL, adapter, &adapter->tag)) != 0) { device_printf(dev, "Failed to register fast interrupt " "handler: %d\n", error); taskqueue_free(que->tq); que->tq = NULL; return (error); } return (0); } /********************************************************************* * * Setup the MSIX Queue Interrupt handlers: * **********************************************************************/ static int igb_allocate_msix(struct adapter *adapter) { device_t dev = adapter->dev; struct igb_queue *que = adapter->queues; int error, rid, vector = 0; int cpu_id = 0; #ifdef RSS cpuset_t cpu_mask; #endif /* Be sure to start with all interrupts disabled */ E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0); E1000_WRITE_FLUSH(&adapter->hw); #ifdef RSS /* * If we're doing RSS, the number of queues needs to * match the number of RSS buckets that are configured. * * + If there's more queues than RSS buckets, we'll end * up with queues that get no traffic. * * + If there's more RSS buckets than queues, we'll end * up having multiple RSS buckets map to the same queue, * so there'll be some contention. */ if (adapter->num_queues != rss_getnumbuckets()) { device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d)" "; performance will be impacted.\n", __func__, adapter->num_queues, rss_getnumbuckets()); } #endif for (int i = 0; i < adapter->num_queues; i++, vector++, que++) { rid = vector +1; que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (que->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "MSIX Queue Interrupt\n"); return (ENXIO); } error = bus_setup_intr(dev, que->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, igb_msix_que, que, &que->tag); if (error) { que->res = NULL; device_printf(dev, "Failed to register Queue handler"); return (error); } #if __FreeBSD_version >= 800504 bus_describe_intr(dev, que->res, que->tag, "que %d", i); #endif que->msix = vector; if (adapter->hw.mac.type == e1000_82575) que->eims = E1000_EICR_TX_QUEUE0 << i; else que->eims = 1 << vector; #ifdef RSS /* * The queue ID is used as the RSS layer bucket ID. * We look up the queue ID -> RSS CPU ID and select * that. */ cpu_id = rss_getcpu(i % rss_getnumbuckets()); #else /* * Bind the msix vector, and thus the * rings to the corresponding cpu. * * This just happens to match the default RSS round-robin * bucket -> queue -> CPU allocation. */ if (adapter->num_queues > 1) { if (igb_last_bind_cpu < 0) igb_last_bind_cpu = CPU_FIRST(); cpu_id = igb_last_bind_cpu; } #endif if (adapter->num_queues > 1) { bus_bind_intr(dev, que->res, cpu_id); #ifdef RSS device_printf(dev, "Bound queue %d to RSS bucket %d\n", i, cpu_id); #else device_printf(dev, "Bound queue %d to cpu %d\n", i, cpu_id); #endif } #ifndef IGB_LEGACY_TX TASK_INIT(&que->txr->txq_task, 0, igb_deferred_mq_start, que->txr); #endif /* Make tasklet for deferred handling */ TASK_INIT(&que->que_task, 0, igb_handle_que, que); que->tq = taskqueue_create("igb_que", M_NOWAIT, taskqueue_thread_enqueue, &que->tq); if (adapter->num_queues > 1) { /* * Only pin the taskqueue thread to a CPU if * RSS is in use. * * This again just happens to match the default RSS * round-robin bucket -> queue -> CPU allocation. */ #ifdef RSS CPU_SETOF(cpu_id, &cpu_mask); taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET, &cpu_mask, "%s que (bucket %d)", device_get_nameunit(adapter->dev), cpu_id); #else taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que (qid %d)", device_get_nameunit(adapter->dev), cpu_id); #endif } else { taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", device_get_nameunit(adapter->dev)); } /* Finally update the last bound CPU id */ if (adapter->num_queues > 1) igb_last_bind_cpu = CPU_NEXT(igb_last_bind_cpu); } /* And Link */ rid = vector + 1; adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (adapter->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "MSIX Link Interrupt\n"); return (ENXIO); } if ((error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, igb_msix_link, adapter, &adapter->tag)) != 0) { device_printf(dev, "Failed to register Link handler"); return (error); } #if __FreeBSD_version >= 800504 bus_describe_intr(dev, adapter->res, adapter->tag, "link"); #endif adapter->linkvec = vector; return (0); } static void igb_configure_queues(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct igb_queue *que; u32 tmp, ivar = 0, newitr = 0; /* First turn on RSS capability */ if (adapter->hw.mac.type != e1000_82575) E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME | E1000_GPIE_PBA | E1000_GPIE_NSICR); /* Turn on MSIX */ switch (adapter->hw.mac.type) { case e1000_82580: case e1000_i350: case e1000_i354: case e1000_i210: case e1000_i211: case e1000_vfadapt: case e1000_vfadapt_i350: /* RX entries */ for (int i = 0; i < adapter->num_queues; i++) { u32 index = i >> 1; ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); que = &adapter->queues[i]; if (i & 1) { ivar &= 0xFF00FFFF; ivar |= (que->msix | E1000_IVAR_VALID) << 16; } else { ivar &= 0xFFFFFF00; ivar |= que->msix | E1000_IVAR_VALID; } E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); } /* TX entries */ for (int i = 0; i < adapter->num_queues; i++) { u32 index = i >> 1; ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); que = &adapter->queues[i]; if (i & 1) { ivar &= 0x00FFFFFF; ivar |= (que->msix | E1000_IVAR_VALID) << 24; } else { ivar &= 0xFFFF00FF; ivar |= (que->msix | E1000_IVAR_VALID) << 8; } E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); adapter->que_mask |= que->eims; } /* And for the link interrupt */ ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8; adapter->link_mask = 1 << adapter->linkvec; E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); break; case e1000_82576: /* RX entries */ for (int i = 0; i < adapter->num_queues; i++) { u32 index = i & 0x7; /* Each IVAR has two entries */ ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); que = &adapter->queues[i]; if (i < 8) { ivar &= 0xFFFFFF00; ivar |= que->msix | E1000_IVAR_VALID; } else { ivar &= 0xFF00FFFF; ivar |= (que->msix | E1000_IVAR_VALID) << 16; } E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); adapter->que_mask |= que->eims; } /* TX entries */ for (int i = 0; i < adapter->num_queues; i++) { u32 index = i & 0x7; /* Each IVAR has two entries */ ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); que = &adapter->queues[i]; if (i < 8) { ivar &= 0xFFFF00FF; ivar |= (que->msix | E1000_IVAR_VALID) << 8; } else { ivar &= 0x00FFFFFF; ivar |= (que->msix | E1000_IVAR_VALID) << 24; } E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); adapter->que_mask |= que->eims; } /* And for the link interrupt */ ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8; adapter->link_mask = 1 << adapter->linkvec; E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); break; case e1000_82575: /* enable MSI-X support*/ tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); tmp |= E1000_CTRL_EXT_PBA_CLR; /* Auto-Mask interrupts upon ICR read. */ tmp |= E1000_CTRL_EXT_EIAME; tmp |= E1000_CTRL_EXT_IRCA; E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); /* Queues */ for (int i = 0; i < adapter->num_queues; i++) { que = &adapter->queues[i]; tmp = E1000_EICR_RX_QUEUE0 << i; tmp |= E1000_EICR_TX_QUEUE0 << i; que->eims = tmp; E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), i, que->eims); adapter->que_mask |= que->eims; } /* Link */ E1000_WRITE_REG(hw, E1000_MSIXBM(adapter->linkvec), E1000_EIMS_OTHER); adapter->link_mask |= E1000_EIMS_OTHER; default: break; } /* Set the starting interrupt rate */ if (igb_max_interrupt_rate > 0) newitr = (4000000 / igb_max_interrupt_rate) & 0x7FFC; if (hw->mac.type == e1000_82575) newitr |= newitr << 16; else newitr |= E1000_EITR_CNT_IGNR; for (int i = 0; i < adapter->num_queues; i++) { que = &adapter->queues[i]; E1000_WRITE_REG(hw, E1000_EITR(que->msix), newitr); } return; } static void igb_free_pci_resources(struct adapter *adapter) { struct igb_queue *que = adapter->queues; device_t dev = adapter->dev; int rid; /* ** There is a slight possibility of a failure mode ** in attach that will result in entering this function ** before interrupt resources have been initialized, and ** in that case we do not want to execute the loops below ** We can detect this reliably by the state of the adapter ** res pointer. */ if (adapter->res == NULL) goto mem; /* * First release all the interrupt resources: */ for (int i = 0; i < adapter->num_queues; i++, que++) { rid = que->msix + 1; if (que->tag != NULL) { bus_teardown_intr(dev, que->res, que->tag); que->tag = NULL; } if (que->res != NULL) bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); } /* Clean the Legacy or Link interrupt last */ if (adapter->linkvec) /* we are doing MSIX */ rid = adapter->linkvec + 1; else (adapter->msix != 0) ? (rid = 1):(rid = 0); que = adapter->queues; if (adapter->tag != NULL) { taskqueue_drain(que->tq, &adapter->link_task); bus_teardown_intr(dev, adapter->res, adapter->tag); adapter->tag = NULL; } if (adapter->res != NULL) bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); for (int i = 0; i < adapter->num_queues; i++, que++) { if (que->tq != NULL) { #ifndef IGB_LEGACY_TX taskqueue_drain(que->tq, &que->txr->txq_task); #endif taskqueue_drain(que->tq, &que->que_task); taskqueue_free(que->tq); } } mem: if (adapter->msix) pci_release_msi(dev); if (adapter->msix_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, adapter->memrid, adapter->msix_mem); if (adapter->pci_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), adapter->pci_mem); } /* * Setup Either MSI/X or MSI */ static int igb_setup_msix(struct adapter *adapter) { device_t dev = adapter->dev; int bar, want, queues, msgs, maxqueues; /* tuneable override */ if (igb_enable_msix == 0) goto msi; /* First try MSI/X */ msgs = pci_msix_count(dev); if (msgs == 0) goto msi; /* ** Some new devices, as with ixgbe, now may ** use a different BAR, so we need to keep ** track of which is used. */ adapter->memrid = PCIR_BAR(IGB_MSIX_BAR); bar = pci_read_config(dev, adapter->memrid, 4); if (bar == 0) /* use next bar */ adapter->memrid += 4; adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &adapter->memrid, RF_ACTIVE); if (adapter->msix_mem == NULL) { /* May not be enabled */ device_printf(adapter->dev, "Unable to map MSIX table \n"); goto msi; } queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus; /* Override via tuneable */ if (igb_num_queues != 0) queues = igb_num_queues; #ifdef RSS /* If we're doing RSS, clamp at the number of RSS buckets */ if (queues > rss_getnumbuckets()) queues = rss_getnumbuckets(); #endif /* Sanity check based on HW */ switch (adapter->hw.mac.type) { case e1000_82575: maxqueues = 4; break; case e1000_82576: case e1000_82580: case e1000_i350: case e1000_i354: maxqueues = 8; break; case e1000_i210: maxqueues = 4; break; case e1000_i211: maxqueues = 2; break; default: /* VF interfaces */ maxqueues = 1; break; } /* Final clamp on the actual hardware capability */ if (queues > maxqueues) queues = maxqueues; /* ** One vector (RX/TX pair) per queue ** plus an additional for Link interrupt */ want = queues + 1; if (msgs >= want) msgs = want; else { device_printf(adapter->dev, "MSIX Configuration Problem, " "%d vectors configured, but %d queues wanted!\n", msgs, want); goto msi; } if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) { device_printf(adapter->dev, "Using MSIX interrupts with %d vectors\n", msgs); adapter->num_queues = queues; return (msgs); } /* ** If MSIX alloc failed or provided us with ** less than needed, free and fall through to MSI */ pci_release_msi(dev); msi: if (adapter->msix_mem != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem); adapter->msix_mem = NULL; } msgs = 1; if (pci_alloc_msi(dev, &msgs) == 0) { device_printf(adapter->dev," Using an MSI interrupt\n"); return (msgs); } device_printf(adapter->dev," Using a Legacy interrupt\n"); return (0); } /********************************************************************* * * Initialize the DMA Coalescing feature * **********************************************************************/ static void igb_init_dmac(struct adapter *adapter, u32 pba) { device_t dev = adapter->dev; struct e1000_hw *hw = &adapter->hw; u32 dmac, reg = ~E1000_DMACR_DMAC_EN; u16 hwm; if (hw->mac.type == e1000_i211) return; if (hw->mac.type > e1000_82580) { if (adapter->dmac == 0) { /* Disabling it */ E1000_WRITE_REG(hw, E1000_DMACR, reg); return; } else device_printf(dev, "DMA Coalescing enabled\n"); /* Set starting threshold */ E1000_WRITE_REG(hw, E1000_DMCTXTH, 0); hwm = 64 * pba - adapter->max_frame_size / 16; if (hwm < 64 * (pba - 6)) hwm = 64 * (pba - 6); reg = E1000_READ_REG(hw, E1000_FCRTC); reg &= ~E1000_FCRTC_RTH_COAL_MASK; reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) & E1000_FCRTC_RTH_COAL_MASK); E1000_WRITE_REG(hw, E1000_FCRTC, reg); dmac = pba - adapter->max_frame_size / 512; if (dmac < pba - 10) dmac = pba - 10; reg = E1000_READ_REG(hw, E1000_DMACR); reg &= ~E1000_DMACR_DMACTHR_MASK; reg = ((dmac << E1000_DMACR_DMACTHR_SHIFT) & E1000_DMACR_DMACTHR_MASK); /* transition to L0x or L1 if available..*/ reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); /* Check if status is 2.5Gb backplane connection * before configuration of watchdog timer, which is * in msec values in 12.8usec intervals * watchdog timer= msec values in 32usec intervals * for non 2.5Gb connection */ if (hw->mac.type == e1000_i354) { int status = E1000_READ_REG(hw, E1000_STATUS); if ((status & E1000_STATUS_2P5_SKU) && (!(status & E1000_STATUS_2P5_SKU_OVER))) reg |= ((adapter->dmac * 5) >> 6); else reg |= (adapter->dmac >> 5); } else { reg |= (adapter->dmac >> 5); } E1000_WRITE_REG(hw, E1000_DMACR, reg); E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); /* Set the interval before transition */ reg = E1000_READ_REG(hw, E1000_DMCTLX); if (hw->mac.type == e1000_i350) reg |= IGB_DMCTLX_DCFLUSH_DIS; /* ** in 2.5Gb connection, TTLX unit is 0.4 usec ** which is 0x4*2 = 0xA. But delay is still 4 usec */ if (hw->mac.type == e1000_i354) { int status = E1000_READ_REG(hw, E1000_STATUS); if ((status & E1000_STATUS_2P5_SKU) && (!(status & E1000_STATUS_2P5_SKU_OVER))) reg |= 0xA; else reg |= 0x4; } else { reg |= 0x4; } E1000_WRITE_REG(hw, E1000_DMCTLX, reg); /* free space in tx packet buffer to wake from DMA coal */ E1000_WRITE_REG(hw, E1000_DMCTXTH, (IGB_TXPBSIZE - (2 * adapter->max_frame_size)) >> 6); /* make low power state decision controlled by DMA coal */ reg = E1000_READ_REG(hw, E1000_PCIEMISC); reg &= ~E1000_PCIEMISC_LX_DECISION; E1000_WRITE_REG(hw, E1000_PCIEMISC, reg); } else if (hw->mac.type == e1000_82580) { u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC); E1000_WRITE_REG(hw, E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); E1000_WRITE_REG(hw, E1000_DMACR, 0); } } /********************************************************************* * * Set up an fresh starting state * **********************************************************************/ static void igb_reset(struct adapter *adapter) { device_t dev = adapter->dev; struct e1000_hw *hw = &adapter->hw; struct e1000_fc_info *fc = &hw->fc; struct ifnet *ifp = adapter->ifp; u32 pba = 0; u16 hwm; INIT_DEBUGOUT("igb_reset: begin"); /* Let the firmware know the OS is in control */ igb_get_hw_control(adapter); /* * Packet Buffer Allocation (PBA) * Writing PBA sets the receive portion of the buffer * the remainder is used for the transmit buffer. */ switch (hw->mac.type) { case e1000_82575: pba = E1000_PBA_32K; break; case e1000_82576: case e1000_vfadapt: pba = E1000_READ_REG(hw, E1000_RXPBS); pba &= E1000_RXPBS_SIZE_MASK_82576; break; case e1000_82580: case e1000_i350: case e1000_i354: case e1000_vfadapt_i350: pba = E1000_READ_REG(hw, E1000_RXPBS); pba = e1000_rxpbs_adjust_82580(pba); break; case e1000_i210: case e1000_i211: pba = E1000_PBA_34K; default: break; } /* Special needs in case of Jumbo frames */ if ((hw->mac.type == e1000_82575) && (ifp->if_mtu > ETHERMTU)) { u32 tx_space, min_tx, min_rx; pba = E1000_READ_REG(hw, E1000_PBA); tx_space = pba >> 16; pba &= 0xffff; min_tx = (adapter->max_frame_size + sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2; min_tx = roundup2(min_tx, 1024); min_tx >>= 10; min_rx = adapter->max_frame_size; min_rx = roundup2(min_rx, 1024); min_rx >>= 10; if (tx_space < min_tx && ((min_tx - tx_space) < pba)) { pba = pba - (min_tx - tx_space); /* * if short on rx space, rx wins * and must trump tx adjustment */ if (pba < min_rx) pba = min_rx; } E1000_WRITE_REG(hw, E1000_PBA, pba); } INIT_DEBUGOUT1("igb_init: pba=%dK",pba); /* * These parameters control the automatic generation (Tx) and * response (Rx) to Ethernet PAUSE frames. * - High water mark should allow for at least two frames to be * received after sending an XOFF. * - Low water mark works best when it is very near the high water mark. * This allows the receiver to restart by sending XON when it has * drained a bit. */ hwm = min(((pba << 10) * 9 / 10), ((pba << 10) - 2 * adapter->max_frame_size)); if (hw->mac.type < e1000_82576) { fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ fc->low_water = fc->high_water - 8; } else { fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ fc->low_water = fc->high_water - 16; } fc->pause_time = IGB_FC_PAUSE_TIME; fc->send_xon = TRUE; if (adapter->fc) fc->requested_mode = adapter->fc; else fc->requested_mode = e1000_fc_default; /* Issue a global reset */ e1000_reset_hw(hw); E1000_WRITE_REG(hw, E1000_WUC, 0); /* Reset for AutoMediaDetect */ if (adapter->flags & IGB_MEDIA_RESET) { e1000_setup_init_funcs(hw, TRUE); e1000_get_bus_info(hw); adapter->flags &= ~IGB_MEDIA_RESET; } if (e1000_init_hw(hw) < 0) device_printf(dev, "Hardware Initialization Failed\n"); /* Setup DMA Coalescing */ igb_init_dmac(adapter, pba); E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); e1000_get_phy_info(hw); e1000_check_for_link(hw); return; } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ static int igb_setup_interface(device_t dev, struct adapter *adapter) { struct ifnet *ifp; INIT_DEBUGOUT("igb_setup_interface: begin"); ifp = adapter->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); return (-1); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_init = igb_init; ifp->if_softc = adapter; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = igb_ioctl; ifp->if_get_counter = igb_get_counter; + + /* TSO parameters */ + ifp->if_hw_tsomax = IP_MAXPACKET; + ifp->if_hw_tsomaxsegcount = IGB_MAX_SCATTER; + ifp->if_hw_tsomaxsegsize = IGB_TSO_SEG_SIZE; + #ifndef IGB_LEGACY_TX ifp->if_transmit = igb_mq_start; ifp->if_qflush = igb_qflush; #else ifp->if_start = igb_start; IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1); ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1; IFQ_SET_READY(&ifp->if_snd); #endif ether_ifattach(ifp, adapter->hw.mac.addr); ifp->if_capabilities = ifp->if_capenable = 0; ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; ifp->if_capabilities |= IFCAP_TSO; ifp->if_capabilities |= IFCAP_JUMBO_MTU; ifp->if_capenable = ifp->if_capabilities; /* Don't enable LRO by default */ ifp->if_capabilities |= IFCAP_LRO; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Tell the upper layer(s) we * support full VLAN capability. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU; /* ** Don't turn this on by default, if vlans are ** created on another pseudo device (eg. lagg) ** then vlan events are not passed thru, breaking ** operation, but with HW FILTER off it works. If ** using vlans directly on the igb driver you can ** enable this and get full hardware tag filtering. */ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, igb_media_change, igb_media_status); if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); } else { ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); if (adapter->hw.phy.type != e1000_phy_ife) { ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); } } ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); return (0); } /* * Manage DMA'able memory. */ static void igb_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error) return; *(bus_addr_t *) arg = segs[0].ds_addr; } static int igb_dma_malloc(struct adapter *adapter, bus_size_t size, struct igb_dma_alloc *dma, int mapflags) { int error; error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ IGB_DBA_ALIGN, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->dma_tag); if (error) { device_printf(adapter->dev, "%s: bus_dma_tag_create failed: %d\n", __func__, error); goto fail_0; } error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map); if (error) { device_printf(adapter->dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", __func__, (uintmax_t)size, error); goto fail_2; } dma->dma_paddr = 0; error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, igb_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (error || dma->dma_paddr == 0) { device_printf(adapter->dev, "%s: bus_dmamap_load failed: %d\n", __func__, error); goto fail_3; } return (0); fail_3: bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_tag = NULL; return (error); } static void igb_dma_free(struct adapter *adapter, struct igb_dma_alloc *dma) { if (dma->dma_tag == NULL) return; if (dma->dma_paddr != 0) { bus_dmamap_sync(dma->dma_tag, dma->dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->dma_tag, dma->dma_map); dma->dma_paddr = 0; } if (dma->dma_vaddr != NULL) { bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); dma->dma_vaddr = NULL; } bus_dma_tag_destroy(dma->dma_tag); dma->dma_tag = NULL; } /********************************************************************* * * Allocate memory for the transmit and receive rings, and then * the descriptors associated with each, called only once at attach. * **********************************************************************/ static int igb_allocate_queues(struct adapter *adapter) { device_t dev = adapter->dev; struct igb_queue *que = NULL; struct tx_ring *txr = NULL; struct rx_ring *rxr = NULL; int rsize, tsize, error = E1000_SUCCESS; int txconf = 0, rxconf = 0; /* First allocate the top level queue structs */ if (!(adapter->queues = (struct igb_queue *) malloc(sizeof(struct igb_queue) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate queue memory\n"); error = ENOMEM; goto fail; } /* Next allocate the TX ring struct memory */ if (!(adapter->tx_rings = (struct tx_ring *) malloc(sizeof(struct tx_ring) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate TX ring memory\n"); error = ENOMEM; goto tx_fail; } /* Now allocate the RX */ if (!(adapter->rx_rings = (struct rx_ring *) malloc(sizeof(struct rx_ring) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX ring memory\n"); error = ENOMEM; goto rx_fail; } tsize = roundup2(adapter->num_tx_desc * sizeof(union e1000_adv_tx_desc), IGB_DBA_ALIGN); /* * Now set up the TX queues, txconf is needed to handle the * possibility that things fail midcourse and we need to * undo memory gracefully */ for (int i = 0; i < adapter->num_queues; i++, txconf++) { /* Set up some basics */ txr = &adapter->tx_rings[i]; txr->adapter = adapter; txr->me = i; txr->num_desc = adapter->num_tx_desc; /* Initialize the TX lock */ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", device_get_nameunit(dev), txr->me); mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); if (igb_dma_malloc(adapter, tsize, &txr->txdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate TX Descriptor memory\n"); error = ENOMEM; goto err_tx_desc; } txr->tx_base = (union e1000_adv_tx_desc *)txr->txdma.dma_vaddr; bzero((void *)txr->tx_base, tsize); /* Now allocate transmit buffers for the ring */ if (igb_allocate_transmit_buffers(txr)) { device_printf(dev, "Critical Failure setting up transmit buffers\n"); error = ENOMEM; goto err_tx_desc; } #ifndef IGB_LEGACY_TX /* Allocate a buf ring */ txr->br = buf_ring_alloc(igb_buf_ring_size, M_DEVBUF, M_WAITOK, &txr->tx_mtx); #endif } /* * Next the RX queues... */ rsize = roundup2(adapter->num_rx_desc * sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN); for (int i = 0; i < adapter->num_queues; i++, rxconf++) { rxr = &adapter->rx_rings[i]; rxr->adapter = adapter; rxr->me = i; /* Initialize the RX lock */ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", device_get_nameunit(dev), txr->me); mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); if (igb_dma_malloc(adapter, rsize, &rxr->rxdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate RxDescriptor memory\n"); error = ENOMEM; goto err_rx_desc; } rxr->rx_base = (union e1000_adv_rx_desc *)rxr->rxdma.dma_vaddr; bzero((void *)rxr->rx_base, rsize); /* Allocate receive buffers for the ring*/ if (igb_allocate_receive_buffers(rxr)) { device_printf(dev, "Critical Failure setting up receive buffers\n"); error = ENOMEM; goto err_rx_desc; } } /* ** Finally set up the queue holding structs */ for (int i = 0; i < adapter->num_queues; i++) { que = &adapter->queues[i]; que->adapter = adapter; que->txr = &adapter->tx_rings[i]; que->rxr = &adapter->rx_rings[i]; } return (0); err_rx_desc: for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) igb_dma_free(adapter, &rxr->rxdma); err_tx_desc: for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) igb_dma_free(adapter, &txr->txdma); free(adapter->rx_rings, M_DEVBUF); rx_fail: #ifndef IGB_LEGACY_TX buf_ring_free(txr->br, M_DEVBUF); #endif free(adapter->tx_rings, M_DEVBUF); tx_fail: free(adapter->queues, M_DEVBUF); fail: return (error); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. This is * called only once at attach, setup is done every reset. * **********************************************************************/ static int igb_allocate_transmit_buffers(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; device_t dev = adapter->dev; struct igb_tx_buf *txbuf; int error, i; /* * Setup DMA descriptor areas. */ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ IGB_TSO_SIZE, /* maxsize */ IGB_MAX_SCATTER, /* nsegments */ PAGE_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txr->txtag))) { device_printf(dev,"Unable to allocate TX DMA tag\n"); goto fail; } if (!(txr->tx_buffers = (struct igb_tx_buf *) malloc(sizeof(struct igb_tx_buf) * adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); error = ENOMEM; goto fail; } /* Create the descriptor buffer dma maps */ txbuf = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { error = bus_dmamap_create(txr->txtag, 0, &txbuf->map); if (error != 0) { device_printf(dev, "Unable to create TX DMA map\n"); goto fail; } } return 0; fail: /* We free all, it handles case where we are in the middle */ igb_free_transmit_structures(adapter); return (error); } /********************************************************************* * * Initialize a transmit ring. * **********************************************************************/ static void igb_setup_transmit_ring(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct igb_tx_buf *txbuf; int i; #ifdef DEV_NETMAP struct netmap_adapter *na = NA(adapter->ifp); struct netmap_slot *slot; #endif /* DEV_NETMAP */ /* Clear the old descriptor contents */ IGB_TX_LOCK(txr); #ifdef DEV_NETMAP slot = netmap_reset(na, NR_TX, txr->me, 0); #endif /* DEV_NETMAP */ bzero((void *)txr->tx_base, (sizeof(union e1000_adv_tx_desc)) * adapter->num_tx_desc); /* Reset indices */ txr->next_avail_desc = 0; txr->next_to_clean = 0; /* Free any existing tx buffers. */ txbuf = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { if (txbuf->m_head != NULL) { bus_dmamap_sync(txr->txtag, txbuf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, txbuf->map); m_freem(txbuf->m_head); txbuf->m_head = NULL; } #ifdef DEV_NETMAP if (slot) { int si = netmap_idx_n2k(&na->tx_rings[txr->me], i); /* no need to set the address */ netmap_load_map(na, txr->txtag, txbuf->map, NMB(na, slot + si)); } #endif /* DEV_NETMAP */ /* clear the watch index */ txbuf->eop = NULL; } /* Set number of descriptors available */ txr->tx_avail = adapter->num_tx_desc; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); IGB_TX_UNLOCK(txr); } /********************************************************************* * * Initialize all transmit rings. * **********************************************************************/ static void igb_setup_transmit_structures(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; for (int i = 0; i < adapter->num_queues; i++, txr++) igb_setup_transmit_ring(txr); return; } /********************************************************************* * * Enable transmit unit. * **********************************************************************/ static void igb_initialize_transmit_units(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; struct e1000_hw *hw = &adapter->hw; u32 tctl, txdctl; INIT_DEBUGOUT("igb_initialize_transmit_units: begin"); tctl = txdctl = 0; /* Setup the Tx Descriptor Rings */ for (int i = 0; i < adapter->num_queues; i++, txr++) { u64 bus_addr = txr->txdma.dma_paddr; E1000_WRITE_REG(hw, E1000_TDLEN(i), adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); E1000_WRITE_REG(hw, E1000_TDBAH(i), (uint32_t)(bus_addr >> 32)); E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr); /* Setup the HW Tx Head and Tail descriptor pointers */ E1000_WRITE_REG(hw, E1000_TDT(i), 0); E1000_WRITE_REG(hw, E1000_TDH(i), 0); HW_DEBUGOUT2("Base = %x, Length = %x\n", E1000_READ_REG(hw, E1000_TDBAL(i)), E1000_READ_REG(hw, E1000_TDLEN(i))); txr->queue_status = IGB_QUEUE_IDLE; txdctl |= IGB_TX_PTHRESH; txdctl |= IGB_TX_HTHRESH << 8; txdctl |= IGB_TX_WTHRESH << 16; txdctl |= E1000_TXDCTL_QUEUE_ENABLE; E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); } if (adapter->vf_ifp) return; e1000_config_collision_dist(hw); /* Program the Transmit Control Register */ tctl = E1000_READ_REG(hw, E1000_TCTL); tctl &= ~E1000_TCTL_CT; tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); /* This write will effectively turn on the transmit unit. */ E1000_WRITE_REG(hw, E1000_TCTL, tctl); } /********************************************************************* * * Free all transmit rings. * **********************************************************************/ static void igb_free_transmit_structures(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; for (int i = 0; i < adapter->num_queues; i++, txr++) { IGB_TX_LOCK(txr); igb_free_transmit_buffers(txr); igb_dma_free(adapter, &txr->txdma); IGB_TX_UNLOCK(txr); IGB_TX_LOCK_DESTROY(txr); } free(adapter->tx_rings, M_DEVBUF); } /********************************************************************* * * Free transmit ring related data structures. * **********************************************************************/ static void igb_free_transmit_buffers(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct igb_tx_buf *tx_buffer; int i; INIT_DEBUGOUT("free_transmit_ring: begin"); if (txr->tx_buffers == NULL) return; tx_buffer = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { if (tx_buffer->m_head != NULL) { bus_dmamap_sync(txr->txtag, tx_buffer->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; if (tx_buffer->map != NULL) { bus_dmamap_destroy(txr->txtag, tx_buffer->map); tx_buffer->map = NULL; } } else if (tx_buffer->map != NULL) { bus_dmamap_unload(txr->txtag, tx_buffer->map); bus_dmamap_destroy(txr->txtag, tx_buffer->map); tx_buffer->map = NULL; } } #ifndef IGB_LEGACY_TX if (txr->br != NULL) buf_ring_free(txr->br, M_DEVBUF); #endif if (txr->tx_buffers != NULL) { free(txr->tx_buffers, M_DEVBUF); txr->tx_buffers = NULL; } if (txr->txtag != NULL) { bus_dma_tag_destroy(txr->txtag); txr->txtag = NULL; } return; } /********************************************************************** * * Setup work for hardware segmentation offload (TSO) on * adapters using advanced tx descriptors * **********************************************************************/ static int igb_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len, u32 *olinfo_status) { struct adapter *adapter = txr->adapter; struct e1000_adv_tx_context_desc *TXD; u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; u32 mss_l4len_idx = 0, paylen; u16 vtag = 0, eh_type; int ctxd, ehdrlen, ip_hlen, tcp_hlen; struct ether_vlan_header *eh; #ifdef INET6 struct ip6_hdr *ip6; #endif #ifdef INET struct ip *ip; #endif struct tcphdr *th; /* * Determine where frame payload starts. * Jump over vlan headers if already present */ eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; eh_type = eh->evl_proto; } else { ehdrlen = ETHER_HDR_LEN; eh_type = eh->evl_encap_proto; } switch (ntohs(eh_type)) { #ifdef INET6 case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); /* XXX-BZ For now we do not pretend to support ext. hdrs. */ if (ip6->ip6_nxt != IPPROTO_TCP) return (ENXIO); ip_hlen = sizeof(struct ip6_hdr); ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; break; #endif #ifdef INET case ETHERTYPE_IP: ip = (struct ip *)(mp->m_data + ehdrlen); if (ip->ip_p != IPPROTO_TCP) return (ENXIO); ip->ip_sum = 0; ip_hlen = ip->ip_hl << 2; th = (struct tcphdr *)((caddr_t)ip + ip_hlen); th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; /* Tell transmit desc to also do IPv4 checksum. */ *olinfo_status |= E1000_TXD_POPTS_IXSM << 8; break; #endif default: panic("%s: CSUM_TSO but no supported IP version (0x%04x)", __func__, ntohs(eh_type)); break; } ctxd = txr->next_avail_desc; TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd]; tcp_hlen = th->th_off << 2; /* This is used in the transmit desc in encap */ paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen; /* VLAN MACLEN IPLEN */ if (mp->m_flags & M_VLANTAG) { vtag = htole16(mp->m_pkthdr.ether_vtag); vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT); } vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= ip_hlen; TXD->vlan_macip_lens = htole32(vlan_macip_lens); /* ADV DTYPE TUCMD */ type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); /* MSS L4LEN IDX */ mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT); mss_l4len_idx |= (tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT); /* 82575 needs the queue index added */ if (adapter->hw.mac.type == e1000_82575) mss_l4len_idx |= txr->me << 4; TXD->mss_l4len_idx = htole32(mss_l4len_idx); TXD->seqnum_seed = htole32(0); if (++ctxd == txr->num_desc) ctxd = 0; txr->tx_avail--; txr->next_avail_desc = ctxd; *cmd_type_len |= E1000_ADVTXD_DCMD_TSE; *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; *olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT; ++txr->tso_tx; return (0); } /********************************************************************* * * Advanced Context Descriptor setup for VLAN, CSUM or TSO * **********************************************************************/ static int igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len, u32 *olinfo_status) { struct e1000_adv_tx_context_desc *TXD; struct adapter *adapter = txr->adapter; struct ether_vlan_header *eh; struct ip *ip; struct ip6_hdr *ip6; u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0, mss_l4len_idx = 0; int ehdrlen, ip_hlen = 0; u16 etype; u8 ipproto = 0; int offload = TRUE; int ctxd = txr->next_avail_desc; u16 vtag = 0; /* First check if TSO is to be used */ if (mp->m_pkthdr.csum_flags & CSUM_TSO) return (igb_tso_setup(txr, mp, cmd_type_len, olinfo_status)); if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0) offload = FALSE; /* Indicate the whole packet as payload when not doing TSO */ *olinfo_status |= mp->m_pkthdr.len << E1000_ADVTXD_PAYLEN_SHIFT; /* Now ready a context descriptor */ TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd]; /* ** In advanced descriptors the vlan tag must ** be placed into the context descriptor. Hence ** we need to make one even if not doing offloads. */ if (mp->m_flags & M_VLANTAG) { vtag = htole16(mp->m_pkthdr.ether_vtag); vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT); } else if (offload == FALSE) /* ... no offload to do */ return (0); /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); ehdrlen = ETHER_HDR_LEN; } /* Set the ether header length */ vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; switch (etype) { case ETHERTYPE_IP: ip = (struct ip *)(mp->m_data + ehdrlen); ip_hlen = ip->ip_hl << 2; ipproto = ip->ip_p; type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; break; case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); ip_hlen = sizeof(struct ip6_hdr); /* XXX-BZ this will go badly in case of ext hdrs. */ ipproto = ip6->ip6_nxt; type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; break; default: offload = FALSE; break; } vlan_macip_lens |= ip_hlen; type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; switch (ipproto) { case IPPROTO_TCP: if (mp->m_pkthdr.csum_flags & CSUM_TCP) type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; break; case IPPROTO_UDP: if (mp->m_pkthdr.csum_flags & CSUM_UDP) type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; break; #if __FreeBSD_version >= 800000 case IPPROTO_SCTP: if (mp->m_pkthdr.csum_flags & CSUM_SCTP) type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP; break; #endif default: offload = FALSE; break; } if (offload) /* For the TX descriptor setup */ *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; /* 82575 needs the queue index added */ if (adapter->hw.mac.type == e1000_82575) mss_l4len_idx = txr->me << 4; /* Now copy bits into descriptor */ TXD->vlan_macip_lens = htole32(vlan_macip_lens); TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); TXD->seqnum_seed = htole32(0); TXD->mss_l4len_idx = htole32(mss_l4len_idx); /* We've consumed the first desc, adjust counters */ if (++ctxd == txr->num_desc) ctxd = 0; txr->next_avail_desc = ctxd; --txr->tx_avail; return (0); } /********************************************************************** * * Examine each tx_buffer in the used queue. If the hardware is done * processing the packet then free associated resources. The * tx_buffer is put back on the free queue. * * TRUE return means there's work in the ring to clean, FALSE its empty. **********************************************************************/ static bool igb_txeof(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; #ifdef DEV_NETMAP struct ifnet *ifp = adapter->ifp; #endif /* DEV_NETMAP */ u32 work, processed = 0; int limit = adapter->tx_process_limit; struct igb_tx_buf *buf; union e1000_adv_tx_desc *txd; mtx_assert(&txr->tx_mtx, MA_OWNED); #ifdef DEV_NETMAP if (netmap_tx_irq(ifp, txr->me)) return (FALSE); #endif /* DEV_NETMAP */ if (txr->tx_avail == txr->num_desc) { txr->queue_status = IGB_QUEUE_IDLE; return FALSE; } /* Get work starting point */ work = txr->next_to_clean; buf = &txr->tx_buffers[work]; txd = &txr->tx_base[work]; work -= txr->num_desc; /* The distance to ring end */ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); do { union e1000_adv_tx_desc *eop = buf->eop; if (eop == NULL) /* No work */ break; if ((eop->wb.status & E1000_TXD_STAT_DD) == 0) break; /* I/O not complete */ if (buf->m_head) { txr->bytes += buf->m_head->m_pkthdr.len; bus_dmamap_sync(txr->txtag, buf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, buf->map); m_freem(buf->m_head); buf->m_head = NULL; } buf->eop = NULL; ++txr->tx_avail; /* We clean the range if multi segment */ while (txd != eop) { ++txd; ++buf; ++work; /* wrap the ring? */ if (__predict_false(!work)) { work -= txr->num_desc; buf = txr->tx_buffers; txd = txr->tx_base; } if (buf->m_head) { txr->bytes += buf->m_head->m_pkthdr.len; bus_dmamap_sync(txr->txtag, buf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, buf->map); m_freem(buf->m_head); buf->m_head = NULL; } ++txr->tx_avail; buf->eop = NULL; } ++txr->packets; ++processed; txr->watchdog_time = ticks; /* Try the next packet */ ++txd; ++buf; ++work; /* reset with a wrap */ if (__predict_false(!work)) { work -= txr->num_desc; buf = txr->tx_buffers; txd = txr->tx_base; } prefetch(txd); } while (__predict_true(--limit)); bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); work += txr->num_desc; txr->next_to_clean = work; /* ** Watchdog calculation, we know there's ** work outstanding or the first return ** would have been taken, so none processed ** for too long indicates a hang. */ if ((!processed) && ((ticks - txr->watchdog_time) > IGB_WATCHDOG)) txr->queue_status |= IGB_QUEUE_HUNG; if (txr->tx_avail >= IGB_QUEUE_THRESHOLD) txr->queue_status &= ~IGB_QUEUE_DEPLETED; if (txr->tx_avail == txr->num_desc) { txr->queue_status = IGB_QUEUE_IDLE; return (FALSE); } return (TRUE); } /********************************************************************* * * Refresh mbuf buffers for RX descriptor rings * - now keeps its own state so discards due to resource * exhaustion are unnecessary, if an mbuf cannot be obtained * it just returns, keeping its placeholder, thus it can simply * be recalled to try again. * **********************************************************************/ static void igb_refresh_mbufs(struct rx_ring *rxr, int limit) { struct adapter *adapter = rxr->adapter; bus_dma_segment_t hseg[1]; bus_dma_segment_t pseg[1]; struct igb_rx_buf *rxbuf; struct mbuf *mh, *mp; int i, j, nsegs, error; bool refreshed = FALSE; i = j = rxr->next_to_refresh; /* ** Get one descriptor beyond ** our work mark to control ** the loop. */ if (++j == adapter->num_rx_desc) j = 0; while (j != limit) { rxbuf = &rxr->rx_buffers[i]; /* No hdr mbuf used with header split off */ if (rxr->hdr_split == FALSE) goto no_split; if (rxbuf->m_head == NULL) { mh = m_gethdr(M_NOWAIT, MT_DATA); if (mh == NULL) goto update; } else mh = rxbuf->m_head; mh->m_pkthdr.len = mh->m_len = MHLEN; mh->m_len = MHLEN; mh->m_flags |= M_PKTHDR; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->htag, rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { printf("Refresh mbufs: hdr dmamap load" " failure - %d\n", error); m_free(mh); rxbuf->m_head = NULL; goto update; } rxbuf->m_head = mh; bus_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_PREREAD); rxr->rx_base[i].read.hdr_addr = htole64(hseg[0].ds_addr); no_split: if (rxbuf->m_pack == NULL) { mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); if (mp == NULL) goto update; } else mp = rxbuf->m_pack; mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->ptag, rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { printf("Refresh mbufs: payload dmamap load" " failure - %d\n", error); m_free(mp); rxbuf->m_pack = NULL; goto update; } rxbuf->m_pack = mp; bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_PREREAD); rxr->rx_base[i].read.pkt_addr = htole64(pseg[0].ds_addr); refreshed = TRUE; /* I feel wefreshed :) */ i = j; /* our next is precalculated */ rxr->next_to_refresh = i; if (++j == adapter->num_rx_desc) j = 0; } update: if (refreshed) /* update tail */ E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), rxr->next_to_refresh); return; } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per received packet, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've allocated. * **********************************************************************/ static int igb_allocate_receive_buffers(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; device_t dev = adapter->dev; struct igb_rx_buf *rxbuf; int i, bsize, error; bsize = sizeof(struct igb_rx_buf) * adapter->num_rx_desc; if (!(rxr->rx_buffers = (struct igb_rx_buf *) malloc(bsize, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate rx_buffer memory\n"); error = ENOMEM; goto fail; } if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MSIZE, /* maxsize */ 1, /* nsegments */ MSIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &rxr->htag))) { device_printf(dev, "Unable to create RX DMA tag\n"); goto fail; } if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &rxr->ptag))) { device_printf(dev, "Unable to create RX payload DMA tag\n"); goto fail; } for (i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; error = bus_dmamap_create(rxr->htag, 0, &rxbuf->hmap); if (error) { device_printf(dev, "Unable to create RX head DMA maps\n"); goto fail; } error = bus_dmamap_create(rxr->ptag, 0, &rxbuf->pmap); if (error) { device_printf(dev, "Unable to create RX packet DMA maps\n"); goto fail; } } return (0); fail: /* Frees all, but can handle partial completion */ igb_free_receive_structures(adapter); return (error); } static void igb_free_receive_ring(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; struct igb_rx_buf *rxbuf; for (int i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->m_head != NULL) { bus_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->htag, rxbuf->hmap); rxbuf->m_head->m_flags |= M_PKTHDR; m_freem(rxbuf->m_head); } if (rxbuf->m_pack != NULL) { bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->ptag, rxbuf->pmap); rxbuf->m_pack->m_flags |= M_PKTHDR; m_freem(rxbuf->m_pack); } rxbuf->m_head = NULL; rxbuf->m_pack = NULL; } } /********************************************************************* * * Initialize a receive ring and its buffers. * **********************************************************************/ static int igb_setup_receive_ring(struct rx_ring *rxr) { struct adapter *adapter; struct ifnet *ifp; device_t dev; struct igb_rx_buf *rxbuf; bus_dma_segment_t pseg[1], hseg[1]; struct lro_ctrl *lro = &rxr->lro; int rsize, nsegs, error = 0; #ifdef DEV_NETMAP struct netmap_adapter *na = NA(rxr->adapter->ifp); struct netmap_slot *slot; #endif /* DEV_NETMAP */ adapter = rxr->adapter; dev = adapter->dev; ifp = adapter->ifp; /* Clear the ring contents */ IGB_RX_LOCK(rxr); #ifdef DEV_NETMAP slot = netmap_reset(na, NR_RX, rxr->me, 0); #endif /* DEV_NETMAP */ rsize = roundup2(adapter->num_rx_desc * sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN); bzero((void *)rxr->rx_base, rsize); /* ** Free current RX buffer structures and their mbufs */ igb_free_receive_ring(rxr); /* Configure for header split? */ if (igb_header_split) rxr->hdr_split = TRUE; /* Now replenish the ring mbufs */ for (int j = 0; j < adapter->num_rx_desc; ++j) { struct mbuf *mh, *mp; rxbuf = &rxr->rx_buffers[j]; #ifdef DEV_NETMAP if (slot) { /* slot sj is mapped to the j-th NIC-ring entry */ int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j); uint64_t paddr; void *addr; addr = PNMB(na, slot + sj, &paddr); netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr); /* Update descriptor */ rxr->rx_base[j].read.pkt_addr = htole64(paddr); continue; } #endif /* DEV_NETMAP */ if (rxr->hdr_split == FALSE) goto skip_head; /* First the header */ rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA); if (rxbuf->m_head == NULL) { error = ENOBUFS; goto fail; } m_adj(rxbuf->m_head, ETHER_ALIGN); mh = rxbuf->m_head; mh->m_len = mh->m_pkthdr.len = MHLEN; mh->m_flags |= M_PKTHDR; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->htag, rxbuf->hmap, rxbuf->m_head, hseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) /* Nothing elegant to do here */ goto fail; bus_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_PREREAD); /* Update descriptor */ rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr); skip_head: /* Now the payload cluster */ rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); if (rxbuf->m_pack == NULL) { error = ENOBUFS; goto fail; } mp = rxbuf->m_pack; mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->ptag, rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) goto fail; bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_PREREAD); /* Update descriptor */ rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr); } /* Setup our descriptor indices */ rxr->next_to_check = 0; rxr->next_to_refresh = adapter->num_rx_desc - 1; rxr->lro_enabled = FALSE; rxr->rx_split_packets = 0; rxr->rx_bytes = 0; rxr->fmp = NULL; rxr->lmp = NULL; bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* ** Now set up the LRO interface, we ** also only do head split when LRO ** is enabled, since so often they ** are undesireable in similar setups. */ if (ifp->if_capenable & IFCAP_LRO) { error = tcp_lro_init(lro); if (error) { device_printf(dev, "LRO Initialization failed!\n"); goto fail; } INIT_DEBUGOUT("RX LRO Initialized\n"); rxr->lro_enabled = TRUE; lro->ifp = adapter->ifp; } IGB_RX_UNLOCK(rxr); return (0); fail: igb_free_receive_ring(rxr); IGB_RX_UNLOCK(rxr); return (error); } /********************************************************************* * * Initialize all receive rings. * **********************************************************************/ static int igb_setup_receive_structures(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; int i; for (i = 0; i < adapter->num_queues; i++, rxr++) if (igb_setup_receive_ring(rxr)) goto fail; return (0); fail: /* * Free RX buffers allocated so far, we will only handle * the rings that completed, the failing case will have * cleaned up for itself. 'i' is the endpoint. */ for (int j = 0; j < i; ++j) { rxr = &adapter->rx_rings[j]; IGB_RX_LOCK(rxr); igb_free_receive_ring(rxr); IGB_RX_UNLOCK(rxr); } return (ENOBUFS); } /* * Initialise the RSS mapping for NICs that support multiple transmit/ * receive rings. */ static void igb_initialise_rss_mapping(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; int i; int queue_id; u32 reta; u32 rss_key[10], mrqc, shift = 0; /* XXX? */ if (adapter->hw.mac.type == e1000_82575) shift = 6; /* * The redirection table controls which destination * queue each bucket redirects traffic to. * Each DWORD represents four queues, with the LSB * being the first queue in the DWORD. * * This just allocates buckets to queues using round-robin * allocation. * * NOTE: It Just Happens to line up with the default * RSS allocation method. */ /* Warning FM follows */ reta = 0; for (i = 0; i < 128; i++) { #ifdef RSS queue_id = rss_get_indirection_to_bucket(i); /* * If we have more queues than buckets, we'll * end up mapping buckets to a subset of the * queues. * * If we have more buckets than queues, we'll * end up instead assigning multiple buckets * to queues. * * Both are suboptimal, but we need to handle * the case so we don't go out of bounds * indexing arrays and such. */ queue_id = queue_id % adapter->num_queues; #else queue_id = (i % adapter->num_queues); #endif /* Adjust if required */ queue_id = queue_id << shift; /* * The low 8 bits are for hash value (n+0); * The next 8 bits are for hash value (n+1), etc. */ reta = reta >> 8; reta = reta | ( ((uint32_t) queue_id) << 24); if ((i & 3) == 3) { E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta); reta = 0; } } /* Now fill in hash table */ /* * MRQC: Multiple Receive Queues Command * Set queuing to RSS control, number depends on the device. */ mrqc = E1000_MRQC_ENABLE_RSS_8Q; #ifdef RSS /* XXX ew typecasting */ rss_getkey((uint8_t *) &rss_key); #else arc4rand(&rss_key, sizeof(rss_key), 0); #endif for (i = 0; i < 10; i++) E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key[i]); /* * Configure the RSS fields to hash upon. */ mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | E1000_MRQC_RSS_FIELD_IPV4_TCP); mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 | E1000_MRQC_RSS_FIELD_IPV6_TCP); mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP | E1000_MRQC_RSS_FIELD_IPV6_UDP); mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); E1000_WRITE_REG(hw, E1000_MRQC, mrqc); } /********************************************************************* * * Enable receive unit. * **********************************************************************/ static void igb_initialize_receive_units(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; struct ifnet *ifp = adapter->ifp; struct e1000_hw *hw = &adapter->hw; u32 rctl, rxcsum, psize, srrctl = 0; INIT_DEBUGOUT("igb_initialize_receive_unit: begin"); /* * Make sure receives are disabled while setting * up the descriptor ring */ rctl = E1000_READ_REG(hw, E1000_RCTL); E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); /* ** Set up for header split */ if (igb_header_split) { /* Use a standard mbuf for the header */ srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; } else srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; /* ** Set up for jumbo frames */ if (ifp->if_mtu > ETHERMTU) { rctl |= E1000_RCTL_LPE; if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; } /* Set maximum packet len */ psize = adapter->max_frame_size; /* are we on a vlan? */ if (adapter->ifp->if_vlantrunk != NULL) psize += VLAN_TAG_SIZE; E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); } else { rctl &= ~E1000_RCTL_LPE; srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; rctl |= E1000_RCTL_SZ_2048; } /* * If TX flow control is disabled and there's >1 queue defined, * enable DROP. * * This drops frames rather than hanging the RX MAC for all queues. */ if ((adapter->num_queues > 1) && (adapter->fc == e1000_fc_none || adapter->fc == e1000_fc_rx_pause)) { srrctl |= E1000_SRRCTL_DROP_EN; } /* Setup the Base and Length of the Rx Descriptor Rings */ for (int i = 0; i < adapter->num_queues; i++, rxr++) { u64 bus_addr = rxr->rxdma.dma_paddr; u32 rxdctl; E1000_WRITE_REG(hw, E1000_RDLEN(i), adapter->num_rx_desc * sizeof(struct e1000_rx_desc)); E1000_WRITE_REG(hw, E1000_RDBAH(i), (uint32_t)(bus_addr >> 32)); E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr); E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); /* Enable this Queue */ rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; rxdctl &= 0xFFF00000; rxdctl |= IGB_RX_PTHRESH; rxdctl |= IGB_RX_HTHRESH << 8; rxdctl |= IGB_RX_WTHRESH << 16; E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); } /* ** Setup for RX MultiQueue */ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); if (adapter->num_queues >1) { /* rss setup */ igb_initialise_rss_mapping(adapter); /* ** NOTE: Receive Full-Packet Checksum Offload ** is mutually exclusive with Multiqueue. However ** this is not the same as TCP/IP checksums which ** still work. */ rxcsum |= E1000_RXCSUM_PCSD; #if __FreeBSD_version >= 800000 /* For SCTP Offload */ if (((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580)) && (ifp->if_capenable & IFCAP_RXCSUM)) rxcsum |= E1000_RXCSUM_CRCOFL; #endif } else { /* Non RSS setup */ if (ifp->if_capenable & IFCAP_RXCSUM) { rxcsum |= E1000_RXCSUM_IPPCSE; #if __FreeBSD_version >= 800000 if ((adapter->hw.mac.type == e1000_82576) || (adapter->hw.mac.type == e1000_82580)) rxcsum |= E1000_RXCSUM_CRCOFL; #endif } else rxcsum &= ~E1000_RXCSUM_TUOFL; } E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); /* Setup the Receive Control Register */ rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); /* Strip CRC bytes. */ rctl |= E1000_RCTL_SECRC; /* Make sure VLAN Filters are off */ rctl &= ~E1000_RCTL_VFE; /* Don't store bad packets */ rctl &= ~E1000_RCTL_SBP; /* Enable Receives */ E1000_WRITE_REG(hw, E1000_RCTL, rctl); /* * Setup the HW Rx Head and Tail Descriptor Pointers * - needs to be after enable */ for (int i = 0; i < adapter->num_queues; i++) { rxr = &adapter->rx_rings[i]; E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check); #ifdef DEV_NETMAP /* * an init() while a netmap client is active must * preserve the rx buffers passed to userspace. * In this driver it means we adjust RDT to * something different from next_to_refresh * (which is not used in netmap mode). */ if (ifp->if_capenable & IFCAP_NETMAP) { struct netmap_adapter *na = NA(adapter->ifp); struct netmap_kring *kring = &na->rx_rings[i]; int t = rxr->next_to_refresh - nm_kr_rxspace(kring); if (t >= adapter->num_rx_desc) t -= adapter->num_rx_desc; else if (t < 0) t += adapter->num_rx_desc; E1000_WRITE_REG(hw, E1000_RDT(i), t); } else #endif /* DEV_NETMAP */ E1000_WRITE_REG(hw, E1000_RDT(i), rxr->next_to_refresh); } return; } /********************************************************************* * * Free receive rings. * **********************************************************************/ static void igb_free_receive_structures(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; for (int i = 0; i < adapter->num_queues; i++, rxr++) { struct lro_ctrl *lro = &rxr->lro; igb_free_receive_buffers(rxr); tcp_lro_free(lro); igb_dma_free(adapter, &rxr->rxdma); } free(adapter->rx_rings, M_DEVBUF); } /********************************************************************* * * Free receive ring data structures. * **********************************************************************/ static void igb_free_receive_buffers(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; struct igb_rx_buf *rxbuf; int i; INIT_DEBUGOUT("free_receive_structures: begin"); /* Cleanup any existing buffers */ if (rxr->rx_buffers != NULL) { for (i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->m_head != NULL) { bus_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->htag, rxbuf->hmap); rxbuf->m_head->m_flags |= M_PKTHDR; m_freem(rxbuf->m_head); } if (rxbuf->m_pack != NULL) { bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->ptag, rxbuf->pmap); rxbuf->m_pack->m_flags |= M_PKTHDR; m_freem(rxbuf->m_pack); } rxbuf->m_head = NULL; rxbuf->m_pack = NULL; if (rxbuf->hmap != NULL) { bus_dmamap_destroy(rxr->htag, rxbuf->hmap); rxbuf->hmap = NULL; } if (rxbuf->pmap != NULL) { bus_dmamap_destroy(rxr->ptag, rxbuf->pmap); rxbuf->pmap = NULL; } } if (rxr->rx_buffers != NULL) { free(rxr->rx_buffers, M_DEVBUF); rxr->rx_buffers = NULL; } } if (rxr->htag != NULL) { bus_dma_tag_destroy(rxr->htag); rxr->htag = NULL; } if (rxr->ptag != NULL) { bus_dma_tag_destroy(rxr->ptag); rxr->ptag = NULL; } } static __inline void igb_rx_discard(struct rx_ring *rxr, int i) { struct igb_rx_buf *rbuf; rbuf = &rxr->rx_buffers[i]; /* Partially received? Free the chain */ if (rxr->fmp != NULL) { rxr->fmp->m_flags |= M_PKTHDR; m_freem(rxr->fmp); rxr->fmp = NULL; rxr->lmp = NULL; } /* ** With advanced descriptors the writeback ** clobbers the buffer addrs, so its easier ** to just free the existing mbufs and take ** the normal refresh path to get new buffers ** and mapping. */ if (rbuf->m_head) { m_free(rbuf->m_head); rbuf->m_head = NULL; bus_dmamap_unload(rxr->htag, rbuf->hmap); } if (rbuf->m_pack) { m_free(rbuf->m_pack); rbuf->m_pack = NULL; bus_dmamap_unload(rxr->ptag, rbuf->pmap); } return; } static __inline void igb_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype) { /* * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet * should be computed by hardware. Also it should not have VLAN tag in * ethernet header. */ if (rxr->lro_enabled && (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && (ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && (ptype & (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP)) == (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP) && (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { /* * Send to the stack if: ** - LRO not enabled, or ** - no LRO resources, or ** - lro enqueue fails */ if (rxr->lro.lro_cnt != 0) if (tcp_lro_rx(&rxr->lro, m, 0) == 0) return; } IGB_RX_UNLOCK(rxr); (*ifp->if_input)(ifp, m); IGB_RX_LOCK(rxr); } /********************************************************************* * * This routine executes in interrupt context. It replenishes * the mbufs in the descriptor and sends data which has been * dma'ed into host memory to upper layer. * * We loop at most count times if count is > 0, or until done if * count < 0. * * Return TRUE if more to clean, FALSE otherwise *********************************************************************/ static bool igb_rxeof(struct igb_queue *que, int count, int *done) { struct adapter *adapter = que->adapter; struct rx_ring *rxr = que->rxr; struct ifnet *ifp = adapter->ifp; struct lro_ctrl *lro = &rxr->lro; struct lro_entry *queued; int i, processed = 0, rxdone = 0; u32 ptype, staterr = 0; union e1000_adv_rx_desc *cur; IGB_RX_LOCK(rxr); /* Sync the ring. */ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); #ifdef DEV_NETMAP if (netmap_rx_irq(ifp, rxr->me, &processed)) { IGB_RX_UNLOCK(rxr); return (FALSE); } #endif /* DEV_NETMAP */ /* Main clean loop */ for (i = rxr->next_to_check; count != 0;) { struct mbuf *sendmp, *mh, *mp; struct igb_rx_buf *rxbuf; u16 hlen, plen, hdr, vtag, pkt_info; bool eop = FALSE; cur = &rxr->rx_base[i]; staterr = le32toh(cur->wb.upper.status_error); if ((staterr & E1000_RXD_STAT_DD) == 0) break; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; count--; sendmp = mh = mp = NULL; cur->wb.upper.status_error = 0; rxbuf = &rxr->rx_buffers[i]; plen = le16toh(cur->wb.upper.length); ptype = le32toh(cur->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK; if (((adapter->hw.mac.type == e1000_i350) || (adapter->hw.mac.type == e1000_i354)) && (staterr & E1000_RXDEXT_STATERR_LB)) vtag = be16toh(cur->wb.upper.vlan); else vtag = le16toh(cur->wb.upper.vlan); hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info); pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info); eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP); /* * Free the frame (all segments) if we're at EOP and * it's an error. * * The datasheet states that EOP + status is only valid for * the final segment in a multi-segment frame. */ if (eop && ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0)) { adapter->dropped_pkts++; ++rxr->rx_discarded; igb_rx_discard(rxr, i); goto next_desc; } /* ** The way the hardware is configured to ** split, it will ONLY use the header buffer ** when header split is enabled, otherwise we ** get normal behavior, ie, both header and ** payload are DMA'd into the payload buffer. ** ** The fmp test is to catch the case where a ** packet spans multiple descriptors, in that ** case only the first header is valid. */ if (rxr->hdr_split && rxr->fmp == NULL) { bus_dmamap_unload(rxr->htag, rxbuf->hmap); hlen = (hdr & E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; if (hlen > IGB_HDR_BUF) hlen = IGB_HDR_BUF; mh = rxr->rx_buffers[i].m_head; mh->m_len = hlen; /* clear buf pointer for refresh */ rxbuf->m_head = NULL; /* ** Get the payload length, this ** could be zero if its a small ** packet. */ if (plen > 0) { mp = rxr->rx_buffers[i].m_pack; mp->m_len = plen; mh->m_next = mp; /* clear buf pointer */ rxbuf->m_pack = NULL; rxr->rx_split_packets++; } } else { /* ** Either no header split, or a ** secondary piece of a fragmented ** split packet. */ mh = rxr->rx_buffers[i].m_pack; mh->m_len = plen; /* clear buf info for refresh */ rxbuf->m_pack = NULL; } bus_dmamap_unload(rxr->ptag, rxbuf->pmap); ++processed; /* So we know when to refresh */ /* Initial frame - setup */ if (rxr->fmp == NULL) { mh->m_pkthdr.len = mh->m_len; /* Save the head of the chain */ rxr->fmp = mh; rxr->lmp = mh; if (mp != NULL) { /* Add payload if split */ mh->m_pkthdr.len += mp->m_len; rxr->lmp = mh->m_next; } } else { /* Chain mbuf's together */ rxr->lmp->m_next = mh; rxr->lmp = rxr->lmp->m_next; rxr->fmp->m_pkthdr.len += mh->m_len; } if (eop) { rxr->fmp->m_pkthdr.rcvif = ifp; rxr->rx_packets++; /* capture data for AIM */ rxr->packets++; rxr->bytes += rxr->fmp->m_pkthdr.len; rxr->rx_bytes += rxr->fmp->m_pkthdr.len; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) igb_rx_checksum(staterr, rxr->fmp, ptype); if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && (staterr & E1000_RXD_STAT_VP) != 0) { rxr->fmp->m_pkthdr.ether_vtag = vtag; rxr->fmp->m_flags |= M_VLANTAG; } /* * In case of multiqueue, we have RXCSUM.PCSD bit set * and never cleared. This means we have RSS hash * available to be used. */ if (adapter->num_queues > 1) { rxr->fmp->m_pkthdr.flowid = le32toh(cur->wb.lower.hi_dword.rss); switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) { case E1000_RXDADV_RSSTYPE_IPV4_TCP: M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_RSS_TCP_IPV4); break; case E1000_RXDADV_RSSTYPE_IPV4: M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_RSS_IPV4); break; case E1000_RXDADV_RSSTYPE_IPV6_TCP: M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_RSS_TCP_IPV6); break; case E1000_RXDADV_RSSTYPE_IPV6_EX: M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_RSS_IPV6_EX); break; case E1000_RXDADV_RSSTYPE_IPV6: M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_RSS_IPV6); break; case E1000_RXDADV_RSSTYPE_IPV6_TCP_EX: M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_RSS_TCP_IPV6_EX); break; default: /* XXX fallthrough */ M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_OPAQUE); } } else { #ifndef IGB_LEGACY_TX rxr->fmp->m_pkthdr.flowid = que->msix; M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_OPAQUE); #endif } sendmp = rxr->fmp; /* Make sure to set M_PKTHDR. */ sendmp->m_flags |= M_PKTHDR; rxr->fmp = NULL; rxr->lmp = NULL; } next_desc: bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Advance our pointers to the next descriptor. */ if (++i == adapter->num_rx_desc) i = 0; /* ** Send to the stack or LRO */ if (sendmp != NULL) { rxr->next_to_check = i; igb_rx_input(rxr, ifp, sendmp, ptype); i = rxr->next_to_check; rxdone++; } /* Every 8 descriptors we go to refresh mbufs */ if (processed == 8) { igb_refresh_mbufs(rxr, i); processed = 0; } } /* Catch any remainders */ if (igb_rx_unrefreshed(rxr)) igb_refresh_mbufs(rxr, i); rxr->next_to_check = i; /* * Flush any outstanding LRO work */ while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } if (done != NULL) *done += rxdone; IGB_RX_UNLOCK(rxr); return ((staterr & E1000_RXD_STAT_DD) ? TRUE : FALSE); } /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void igb_rx_checksum(u32 staterr, struct mbuf *mp, u32 ptype) { u16 status = (u16)staterr; u8 errors = (u8) (staterr >> 24); int sctp; /* Ignore Checksum bit is set */ if (status & E1000_RXD_STAT_IXSM) { mp->m_pkthdr.csum_flags = 0; return; } if ((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0) sctp = 1; else sctp = 0; if (status & E1000_RXD_STAT_IPCS) { /* Did it pass? */ if (!(errors & E1000_RXD_ERR_IPE)) { /* IP Checksum Good */ mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; } else mp->m_pkthdr.csum_flags = 0; } if (status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); #if __FreeBSD_version >= 800000 if (sctp) /* reassign */ type = CSUM_SCTP_VALID; #endif /* Did it pass? */ if (!(errors & E1000_RXD_ERR_TCPE)) { mp->m_pkthdr.csum_flags |= type; if (sctp == 0) mp->m_pkthdr.csum_data = htons(0xffff); } } return; } /* * This routine is run via an vlan * config EVENT */ static void igb_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct adapter *adapter = ifp->if_softc; u32 index, bit; if (ifp->if_softc != arg) /* Not our event */ return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; IGB_CORE_LOCK(adapter); index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; adapter->shadow_vfta[index] |= (1 << bit); ++adapter->num_vlans; /* Change hw filter setting */ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) igb_setup_vlan_hw_support(adapter); IGB_CORE_UNLOCK(adapter); } /* * This routine is run via an vlan * unconfig EVENT */ static void igb_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct adapter *adapter = ifp->if_softc; u32 index, bit; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; IGB_CORE_LOCK(adapter); index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; adapter->shadow_vfta[index] &= ~(1 << bit); --adapter->num_vlans; /* Change hw filter setting */ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) igb_setup_vlan_hw_support(adapter); IGB_CORE_UNLOCK(adapter); } static void igb_setup_vlan_hw_support(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct ifnet *ifp = adapter->ifp; u32 reg; if (adapter->vf_ifp) { e1000_rlpml_set_vf(hw, adapter->max_frame_size + VLAN_TAG_SIZE); return; } reg = E1000_READ_REG(hw, E1000_CTRL); reg |= E1000_CTRL_VME; E1000_WRITE_REG(hw, E1000_CTRL, reg); /* Enable the Filter Table */ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { reg = E1000_READ_REG(hw, E1000_RCTL); reg &= ~E1000_RCTL_CFIEN; reg |= E1000_RCTL_VFE; E1000_WRITE_REG(hw, E1000_RCTL, reg); } /* Update the frame size */ E1000_WRITE_REG(&adapter->hw, E1000_RLPML, adapter->max_frame_size + VLAN_TAG_SIZE); /* Don't bother with table if no vlans */ if ((adapter->num_vlans == 0) || ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) return; /* ** A soft reset zero's out the VFTA, so ** we need to repopulate it now. */ for (int i = 0; i < IGB_VFTA_SIZE; i++) if (adapter->shadow_vfta[i] != 0) { if (adapter->vf_ifp) e1000_vfta_set_vf(hw, adapter->shadow_vfta[i], TRUE); else e1000_write_vfta(hw, i, adapter->shadow_vfta[i]); } } static void igb_enable_intr(struct adapter *adapter) { /* With RSS set up what to auto clear */ if (adapter->msix_mem) { u32 mask = (adapter->que_mask | adapter->link_mask); E1000_WRITE_REG(&adapter->hw, E1000_EIAC, mask); E1000_WRITE_REG(&adapter->hw, E1000_EIAM, mask); E1000_WRITE_REG(&adapter->hw, E1000_EIMS, mask); E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC); } else { E1000_WRITE_REG(&adapter->hw, E1000_IMS, IMS_ENABLE_MASK); } E1000_WRITE_FLUSH(&adapter->hw); return; } static void igb_disable_intr(struct adapter *adapter) { if (adapter->msix_mem) { E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0); E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0); } E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0); E1000_WRITE_FLUSH(&adapter->hw); return; } /* * Bit of a misnomer, what this really means is * to enable OS management of the system... aka * to disable special hardware management features */ static void igb_init_manageability(struct adapter *adapter) { if (adapter->has_manage) { int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H); int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); /* disable hardware interception of ARP */ manc &= ~(E1000_MANC_ARP_EN); /* enable receiving management packets to the host */ manc |= E1000_MANC_EN_MNG2HOST; manc2h |= 1 << 5; /* Mng Port 623 */ manc2h |= 1 << 6; /* Mng Port 664 */ E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h); E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); } } /* * Give control back to hardware management * controller if there is one. */ static void igb_release_manageability(struct adapter *adapter) { if (adapter->has_manage) { int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); /* re-enable hardware interception of ARP */ manc |= E1000_MANC_ARP_EN; manc &= ~E1000_MANC_EN_MNG2HOST; E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); } } /* * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is loaded. * */ static void igb_get_hw_control(struct adapter *adapter) { u32 ctrl_ext; if (adapter->vf_ifp) return; /* Let firmware know the driver has taken over */ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); } /* * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that the * driver is no longer loaded. * */ static void igb_release_hw_control(struct adapter *adapter) { u32 ctrl_ext; if (adapter->vf_ifp) return; /* Let firmware taken over control of h/w */ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); } static int igb_is_valid_ether_addr(uint8_t *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { return (FALSE); } return (TRUE); } /* * Enable PCI Wake On Lan capability */ static void igb_enable_wakeup(device_t dev) { u16 cap, status; u8 id; /* First find the capabilities pointer*/ cap = pci_read_config(dev, PCIR_CAP_PTR, 2); /* Read the PM Capabilities */ id = pci_read_config(dev, cap, 1); if (id != PCIY_PMG) /* Something wrong */ return; /* OK, we have the power capabilities, so now get the status register */ cap += PCIR_POWER_STATUS; status = pci_read_config(dev, cap, 2); status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(dev, cap, status, 2); return; } static void igb_led_func(void *arg, int onoff) { struct adapter *adapter = arg; IGB_CORE_LOCK(adapter); if (onoff) { e1000_setup_led(&adapter->hw); e1000_led_on(&adapter->hw); } else { e1000_led_off(&adapter->hw); e1000_cleanup_led(&adapter->hw); } IGB_CORE_UNLOCK(adapter); } static uint64_t igb_get_vf_counter(if_t ifp, ift_counter cnt) { struct adapter *adapter; struct e1000_vf_stats *stats; #ifndef IGB_LEGACY_TX struct tx_ring *txr; uint64_t rv; #endif adapter = if_getsoftc(ifp); stats = (struct e1000_vf_stats *)adapter->stats; switch (cnt) { case IFCOUNTER_IPACKETS: return (stats->gprc); case IFCOUNTER_OPACKETS: return (stats->gptc); case IFCOUNTER_IBYTES: return (stats->gorc); case IFCOUNTER_OBYTES: return (stats->gotc); case IFCOUNTER_IMCASTS: return (stats->mprc); case IFCOUNTER_IERRORS: return (adapter->dropped_pkts); case IFCOUNTER_OERRORS: return (adapter->watchdog_events); #ifndef IGB_LEGACY_TX case IFCOUNTER_OQDROPS: rv = 0; txr = adapter->tx_rings; for (int i = 0; i < adapter->num_queues; i++, txr++) rv += txr->br->br_drops; return (rv); #endif default: return (if_get_counter_default(ifp, cnt)); } } static uint64_t igb_get_counter(if_t ifp, ift_counter cnt) { struct adapter *adapter; struct e1000_hw_stats *stats; #ifndef IGB_LEGACY_TX struct tx_ring *txr; uint64_t rv; #endif adapter = if_getsoftc(ifp); if (adapter->vf_ifp) return (igb_get_vf_counter(ifp, cnt)); stats = (struct e1000_hw_stats *)adapter->stats; switch (cnt) { case IFCOUNTER_IPACKETS: return (stats->gprc); case IFCOUNTER_OPACKETS: return (stats->gptc); case IFCOUNTER_IBYTES: return (stats->gorc); case IFCOUNTER_OBYTES: return (stats->gotc); case IFCOUNTER_IMCASTS: return (stats->mprc); case IFCOUNTER_OMCASTS: return (stats->mptc); case IFCOUNTER_IERRORS: return (adapter->dropped_pkts + stats->rxerrc + stats->crcerrs + stats->algnerrc + stats->ruc + stats->roc + stats->cexterr); case IFCOUNTER_OERRORS: return (stats->ecol + stats->latecol + adapter->watchdog_events); case IFCOUNTER_COLLISIONS: return (stats->colc); case IFCOUNTER_IQDROPS: return (stats->mpc); #ifndef IGB_LEGACY_TX case IFCOUNTER_OQDROPS: rv = 0; txr = adapter->tx_rings; for (int i = 0; i < adapter->num_queues; i++, txr++) rv += txr->br->br_drops; return (rv); #endif default: return (if_get_counter_default(ifp, cnt)); } } /********************************************************************** * * Update the board statistics counters. * **********************************************************************/ static void igb_update_stats_counters(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_hw_stats *stats; /* ** The virtual function adapter has only a ** small controlled set of stats, do only ** those and return. */ if (adapter->vf_ifp) { igb_update_vf_stats_counters(adapter); return; } stats = (struct e1000_hw_stats *)adapter->stats; if (adapter->hw.phy.media_type == e1000_media_type_copper || (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { stats->symerrs += E1000_READ_REG(hw,E1000_SYMERRS); stats->sec += E1000_READ_REG(hw, E1000_SEC); } stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); stats->mpc += E1000_READ_REG(hw, E1000_MPC); stats->scc += E1000_READ_REG(hw, E1000_SCC); stats->ecol += E1000_READ_REG(hw, E1000_ECOL); stats->mcc += E1000_READ_REG(hw, E1000_MCC); stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); stats->colc += E1000_READ_REG(hw, E1000_COLC); stats->dc += E1000_READ_REG(hw, E1000_DC); stats->rlec += E1000_READ_REG(hw, E1000_RLEC); stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); /* ** For watchdog management we need to know if we have been ** paused during the last interval, so capture that here. */ adapter->pause_frames = E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); stats->xoffrxc += adapter->pause_frames; stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); stats->gprc += E1000_READ_REG(hw, E1000_GPRC); stats->bprc += E1000_READ_REG(hw, E1000_BPRC); stats->mprc += E1000_READ_REG(hw, E1000_MPRC); stats->gptc += E1000_READ_REG(hw, E1000_GPTC); /* For the 64-bit byte counters the low dword must be read first. */ /* Both registers clear on the read of the high dword */ stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + ((u64)E1000_READ_REG(hw, E1000_GORCH) << 32); stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + ((u64)E1000_READ_REG(hw, E1000_GOTCH) << 32); stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); stats->ruc += E1000_READ_REG(hw, E1000_RUC); stats->rfc += E1000_READ_REG(hw, E1000_RFC); stats->roc += E1000_READ_REG(hw, E1000_ROC); stats->rjc += E1000_READ_REG(hw, E1000_RJC); stats->mgprc += E1000_READ_REG(hw, E1000_MGTPRC); stats->mgpdc += E1000_READ_REG(hw, E1000_MGTPDC); stats->mgptc += E1000_READ_REG(hw, E1000_MGTPTC); stats->tor += E1000_READ_REG(hw, E1000_TORL) + ((u64)E1000_READ_REG(hw, E1000_TORH) << 32); stats->tot += E1000_READ_REG(hw, E1000_TOTL) + ((u64)E1000_READ_REG(hw, E1000_TOTH) << 32); stats->tpr += E1000_READ_REG(hw, E1000_TPR); stats->tpt += E1000_READ_REG(hw, E1000_TPT); stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); stats->mptc += E1000_READ_REG(hw, E1000_MPTC); stats->bptc += E1000_READ_REG(hw, E1000_BPTC); /* Interrupt Counts */ stats->iac += E1000_READ_REG(hw, E1000_IAC); stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); /* Host to Card Statistics */ stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + ((u64)E1000_READ_REG(hw, E1000_HGORCH) << 32)); stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + ((u64)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); /* Driver specific counters */ adapter->device_control = E1000_READ_REG(hw, E1000_CTRL); adapter->rx_control = E1000_READ_REG(hw, E1000_RCTL); adapter->int_mask = E1000_READ_REG(hw, E1000_IMS); adapter->eint_mask = E1000_READ_REG(hw, E1000_EIMS); adapter->packet_buf_alloc_tx = ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); adapter->packet_buf_alloc_rx = (E1000_READ_REG(hw, E1000_PBA) & 0xffff); } /********************************************************************** * * Initialize the VF board statistics counters. * **********************************************************************/ static void igb_vf_init_stats(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_vf_stats *stats; stats = (struct e1000_vf_stats *)adapter->stats; if (stats == NULL) return; stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); } /********************************************************************** * * Update the VF board statistics counters. * **********************************************************************/ static void igb_update_vf_stats_counters(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_vf_stats *stats; if (adapter->link_speed == 0) return; stats = (struct e1000_vf_stats *)adapter->stats; UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc); UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc); UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc); UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc); UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc); } /* Export a single 32-bit register via a read-only sysctl. */ static int igb_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; u_int val; adapter = oidp->oid_arg1; val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2); return (sysctl_handle_int(oidp, &val, 0, req)); } /* ** Tuneable interrupt rate handler */ static int igb_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) { struct igb_queue *que = ((struct igb_queue *)oidp->oid_arg1); int error; u32 reg, usec, rate; reg = E1000_READ_REG(&que->adapter->hw, E1000_EITR(que->msix)); usec = ((reg & 0x7FFC) >> 2); if (usec > 0) rate = 1000000 / usec; else rate = 0; error = sysctl_handle_int(oidp, &rate, 0, req); if (error || !req->newptr) return error; return 0; } /* * Add sysctl variables, one per statistic, to the system. */ static void igb_add_hw_stats(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct e1000_hw_stats *stats = adapter->stats; struct sysctl_oid *stat_node, *queue_node, *int_node, *host_node; struct sysctl_oid_list *stat_list, *queue_list, *int_list, *host_list; #define QUEUE_NAME_LEN 32 char namebuf[QUEUE_NAME_LEN]; /* Driver Statistics */ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", CTLFLAG_RD, &adapter->link_irq, "Link MSIX IRQ Handled"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_fail", CTLFLAG_RD, &adapter->mbuf_defrag_failed, "Defragmenting mbuf chain failed"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", CTLFLAG_RD, &adapter->no_tx_dma_setup, "Driver tx dma failure in xmit"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", CTLFLAG_RD, &adapter->rx_overruns, "RX overruns"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "device_control", CTLFLAG_RD, &adapter->device_control, "Device Control Register"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_control", CTLFLAG_RD, &adapter->rx_control, "Receiver Control Register"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "interrupt_mask", CTLFLAG_RD, &adapter->int_mask, "Interrupt Mask"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "extended_int_mask", CTLFLAG_RD, &adapter->eint_mask, "Extended Interrupt Mask"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_buf_alloc", CTLFLAG_RD, &adapter->packet_buf_alloc_tx, "Transmit Buffer Packet Allocation"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_buf_alloc", CTLFLAG_RD, &adapter->packet_buf_alloc_rx, "Receive Buffer Packet Allocation"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", CTLFLAG_RD, &adapter->hw.fc.high_water, 0, "Flow Control High Watermark"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", CTLFLAG_RD, &adapter->hw.fc.low_water, 0, "Flow Control Low Watermark"); for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { struct lro_ctrl *lro = &rxr->lro; snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", CTLTYPE_UINT | CTLFLAG_RD, &adapter->queues[i], sizeof(&adapter->queues[i]), igb_sysctl_interrupt_rate_handler, "IU", "Interrupt Rate"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(txr->me), igb_sysctl_reg_handler, "IU", "Transmit Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(txr->me), igb_sysctl_reg_handler, "IU", "Transmit Descriptor Tail"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &txr->no_desc_avail, "Queue Descriptors Unavailable"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", CTLFLAG_RD, &txr->total_packets, "Queue Packets Transmitted"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(rxr->me), igb_sysctl_reg_handler, "IU", "Receive Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(rxr->me), igb_sysctl_reg_handler, "IU", "Receive Descriptor Tail"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets", CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued", CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued"); SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed", CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed"); } /* MAC stats get their own sub node */ stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", CTLFLAG_RD, NULL, "MAC Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); /* ** VF adapter has a very limited set of stats ** since its not managing the metal, so to speak. */ if (adapter->vf_ifp) { SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", CTLFLAG_RD, &stats->gprc, "Good Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", CTLFLAG_RD, &stats->gorc, "Good Octets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); return; } SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "excess_coll", CTLFLAG_RD, &stats->ecol, "Excessive collisions"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "single_coll", CTLFLAG_RD, &stats->scc, "Single collisions"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "multiple_coll", CTLFLAG_RD, &stats->mcc, "Multiple collisions"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "late_coll", CTLFLAG_RD, &stats->latecol, "Late collisions"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "collision_count", CTLFLAG_RD, &stats->colc, "Collision Count"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "symbol_errors", CTLFLAG_RD, &stats->symerrs, "Symbol Errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "sequence_errors", CTLFLAG_RD, &stats->sec, "Sequence Errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "defer_count", CTLFLAG_RD, &stats->dc, "Defer Count"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "missed_packets", CTLFLAG_RD, &stats->mpc, "Missed Packets"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_length_errors", CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", CTLFLAG_RD, &stats->rnbc, "Receive No Buffers"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_undersize", CTLFLAG_RD, &stats->ruc, "Receive Undersize"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_oversize", CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_jabber", CTLFLAG_RD, &stats->rjc, "Recevied Jabber"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_errs", CTLFLAG_RD, &stats->rxerrc, "Receive Errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "crc_errs", CTLFLAG_RD, &stats->crcerrs, "CRC errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "alignment_errs", CTLFLAG_RD, &stats->algnerrc, "Alignment Errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_no_crs", CTLFLAG_RD, &stats->tncrs, "Transmit with No CRS"); /* On 82575 these are collision counts */ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs", CTLFLAG_RD, &stats->cexterr, "Collision/Carrier extension errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_recvd", CTLFLAG_RD, &stats->xonrxc, "XON Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_txd", CTLFLAG_RD, &stats->xontxc, "XON Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", CTLFLAG_RD, &stats->xoffrxc, "XOFF Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_txd", CTLFLAG_RD, &stats->xofftxc, "XOFF Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "unsupported_fc_recvd", CTLFLAG_RD, &stats->fcruc, "Unsupported Flow Control Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_recvd", CTLFLAG_RD, &stats->mgprc, "Management Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_drop", CTLFLAG_RD, &stats->mgpdc, "Management Packets Dropped"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_txd", CTLFLAG_RD, &stats->mgptc, "Management Packets Transmitted"); /* Packet Reception Stats */ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", CTLFLAG_RD, &stats->tpr, "Total Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", CTLFLAG_RD, &stats->gprc, "Good Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", CTLFLAG_RD, &stats->prc64, "64 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", CTLFLAG_RD, &stats->gorc, "Good Octets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_octets_recvd", CTLFLAG_RD, &stats->tor, "Total Octets Received"); /* Packet Transmission Stats */ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_octets_txd", CTLFLAG_RD, &stats->tot, "Total Octets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_txd", CTLFLAG_RD, &stats->tsctc, "TSO Contexts Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail", CTLFLAG_RD, &stats->tsctfc, "TSO Contexts Failed"); /* Interrupt Stats */ int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", CTLFLAG_RD, NULL, "Interrupt Statistics"); int_list = SYSCTL_CHILDREN(int_node); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "asserts", CTLFLAG_RD, &stats->iac, "Interrupt Assertion Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer", CTLFLAG_RD, &stats->icrxptc, "Interrupt Cause Rx Pkt Timer Expire Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_abs_timer", CTLFLAG_RD, &stats->icrxatc, "Interrupt Cause Rx Abs Timer Expire Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer", CTLFLAG_RD, &stats->ictxptc, "Interrupt Cause Tx Pkt Timer Expire Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_abs_timer", CTLFLAG_RD, &stats->ictxatc, "Interrupt Cause Tx Abs Timer Expire Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_empty", CTLFLAG_RD, &stats->ictxqec, "Interrupt Cause Tx Queue Empty Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh", CTLFLAG_RD, &stats->ictxqmtc, "Interrupt Cause Tx Queue Min Thresh Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh", CTLFLAG_RD, &stats->icrxdmtc, "Interrupt Cause Rx Desc Min Thresh Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_overrun", CTLFLAG_RD, &stats->icrxoc, "Interrupt Cause Receiver Overrun Count"); /* Host to Card Stats */ host_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "host", CTLFLAG_RD, NULL, "Host to Card Statistics"); host_list = SYSCTL_CHILDREN(host_node); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt", CTLFLAG_RD, &stats->cbtmpc, "Circuit Breaker Tx Packet Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "host_tx_pkt_discard", CTLFLAG_RD, &stats->htdpmc, "Host Transmit Discarded Packets"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_pkt", CTLFLAG_RD, &stats->rpthc, "Rx Packets To Host"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkts", CTLFLAG_RD, &stats->cbrmpc, "Circuit Breaker Rx Packet Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkt_drop", CTLFLAG_RD, &stats->cbrdpc, "Circuit Breaker Rx Dropped Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_pkt", CTLFLAG_RD, &stats->hgptc, "Host Good Packets Tx Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt_drop", CTLFLAG_RD, &stats->htcbdpc, "Host Tx Circuit Breaker Dropped Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_good_bytes", CTLFLAG_RD, &stats->hgorc, "Host Good Octets Received Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_bytes", CTLFLAG_RD, &stats->hgotc, "Host Good Octets Transmit Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "length_errors", CTLFLAG_RD, &stats->lenerrs, "Length Errors"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "serdes_violation_pkt", CTLFLAG_RD, &stats->scvpc, "SerDes/SGMII Code Violation Pkt Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "header_redir_missed", CTLFLAG_RD, &stats->hrmpc, "Header Redirection Missed Packet Count"); } /********************************************************************** * * This routine provides a way to dump out the adapter eeprom, * often a useful debug/service tool. This only dumps the first * 32 words, stuff that matters is in that extent. * **********************************************************************/ static int igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); /* * This value will cause a hex dump of the * first 32 16-bit words of the EEPROM to * the screen. */ if (result == 1) { adapter = (struct adapter *)arg1; igb_print_nvm_info(adapter); } return (error); } static void igb_print_nvm_info(struct adapter *adapter) { u16 eeprom_data; int i, j, row = 0; /* Its a bit crude, but it gets the job done */ printf("\nInterface EEPROM Dump:\n"); printf("Offset\n0x0000 "); for (i = 0, j = 0; i < 32; i++, j++) { if (j == 8) { /* Make the offset block */ j = 0; ++row; printf("\n0x00%x0 ",row); } e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); printf("%04x ", eeprom_data); } printf("\n"); } static void igb_set_sysctl_value(struct adapter *adapter, const char *name, const char *description, int *limit, int value) { *limit = value; SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), OID_AUTO, name, CTLFLAG_RW, limit, value, description); } /* ** Set flow control using sysctl: ** Flow control values: ** 0 - off ** 1 - rx pause ** 2 - tx pause ** 3 - full */ static int igb_set_flowcntl(SYSCTL_HANDLER_ARGS) { int error; static int input = 3; /* default is full */ struct adapter *adapter = (struct adapter *) arg1; error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); switch (input) { case e1000_fc_rx_pause: case e1000_fc_tx_pause: case e1000_fc_full: case e1000_fc_none: adapter->hw.fc.requested_mode = input; adapter->fc = input; break; default: /* Do nothing */ return (error); } adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode; e1000_force_mac_fc(&adapter->hw); /* XXX TODO: update DROP_EN on each RX queue if appropriate */ return (error); } /* ** Manage DMA Coalesce: ** Control values: ** 0/1 - off/on ** Legal timer values are: ** 250,500,1000-10000 in thousands */ static int igb_sysctl_dmac(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *) arg1; int error; error = sysctl_handle_int(oidp, &adapter->dmac, 0, req); if ((error) || (req->newptr == NULL)) return (error); switch (adapter->dmac) { case 0: /* Disabling */ break; case 1: /* Just enable and use default */ adapter->dmac = 1000; break; case 250: case 500: case 1000: case 2000: case 3000: case 4000: case 5000: case 6000: case 7000: case 8000: case 9000: case 10000: /* Legal values - allow */ break; default: /* Do nothing, illegal value */ adapter->dmac = 0; return (EINVAL); } /* Reinit the interface */ igb_init(adapter); return (error); } /* ** Manage Energy Efficient Ethernet: ** Control values: ** 0/1 - enabled/disabled */ static int igb_sysctl_eee(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *) arg1; int error, value; value = adapter->hw.dev_spec._82575.eee_disable; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); IGB_CORE_LOCK(adapter); adapter->hw.dev_spec._82575.eee_disable = (value != 0); igb_init_locked(adapter); IGB_CORE_UNLOCK(adapter); return (0); } Index: projects/release-pkg/sys/dev/e1000/if_igb.h =================================================================== --- projects/release-pkg/sys/dev/e1000/if_igb.h (revision 295925) +++ projects/release-pkg/sys/dev/e1000/if_igb.h (revision 295926) @@ -1,626 +1,626 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IF_IGB_H_ #define _IF_IGB_H_ #include #include #ifndef IGB_LEGACY_TX #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "e1000_api.h" #include "e1000_82575.h" /* Tunables */ /* * IGB_TXD: Maximum number of Transmit Descriptors * * This value is the number of transmit descriptors allocated by the driver. * Increasing this value allows the driver to queue more transmits. Each * descriptor is 16 bytes. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 */ #define IGB_MIN_TXD 256 #define IGB_DEFAULT_TXD 1024 #define IGB_MAX_TXD 4096 /* * IGB_RXD: Maximum number of Receive Descriptors * * This value is the number of receive descriptors allocated by the driver. * Increasing this value allows the driver to buffer more incoming packets. * Each descriptor is 16 bytes. A receive buffer is also allocated for each * descriptor. The maximum MTU size is 16110. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 */ #define IGB_MIN_RXD 256 #define IGB_DEFAULT_RXD 1024 #define IGB_MAX_RXD 4096 /* * IGB_TIDV - Transmit Interrupt Delay Value * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value delays the generation of transmit interrupts in units of * 1.024 microseconds. Transmit interrupt reduction can improve CPU * efficiency if properly tuned for specific network traffic. If the * system is reporting dropped transmits, this value may be set too high * causing the driver to run out of available transmit descriptors. */ #define IGB_TIDV 64 /* * IGB_TADV - Transmit Absolute Interrupt Delay Value * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * transmit interrupt is generated. Useful only if IGB_TIDV is non-zero, * this value ensures that an interrupt is generated after the initial * packet is sent on the wire within the set amount of time. Proper tuning, * along with IGB_TIDV, may improve traffic throughput in specific * network conditions. */ #define IGB_TADV 64 /* * IGB_RDTR - Receive Interrupt Delay Timer (Packet Timer) * Valid Range: 0-65535 (0=off) * Default Value: 0 * This value delays the generation of receive interrupts in units of 1.024 * microseconds. Receive interrupt reduction can improve CPU efficiency if * properly tuned for specific network traffic. Increasing this value adds * extra latency to frame reception and can end up decreasing the throughput * of TCP traffic. If the system is reporting dropped receives, this value * may be set too high, causing the driver to run out of available receive * descriptors. * * CAUTION: When setting IGB_RDTR to a value other than 0, adapters * may hang (stop transmitting) under certain network conditions. * If this occurs a WATCHDOG message is logged in the system * event log. In addition, the controller is automatically reset, * restoring the network connection. To eliminate the potential * for the hang ensure that IGB_RDTR is set to 0. */ #define IGB_RDTR 0 /* * Receive Interrupt Absolute Delay Timer (Not valid for 82542/82543/82544) * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * receive interrupt is generated. Useful only if IGB_RDTR is non-zero, * this value ensures that an interrupt is generated after the initial * packet is received within the set amount of time. Proper tuning, * along with IGB_RDTR, may improve traffic throughput in specific network * conditions. */ #define IGB_RADV 64 /* * This parameter controls the duration of transmit watchdog timer. */ #define IGB_WATCHDOG (10 * hz) /* * This parameter controls when the driver calls the routine to reclaim * transmit descriptors. Cleaning earlier seems a win. */ #define IGB_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 2) /* * This parameter controls whether or not autonegotation is enabled. * 0 - Disable autonegotiation * 1 - Enable autonegotiation */ #define DO_AUTO_NEG 1 /* * This parameter control whether or not the driver will wait for * autonegotiation to complete. * 1 - Wait for autonegotiation to complete * 0 - Don't wait for autonegotiation to complete */ #define WAIT_FOR_AUTO_NEG_DEFAULT 0 /* Tunables -- End */ #define AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ ADVERTISE_1000_FULL) #define AUTO_ALL_MODES 0 /* PHY master/slave setting */ #define IGB_MASTER_SLAVE e1000_ms_hw_default /* Support AutoMediaDetect for Marvell M88 PHY in i354 */ #define IGB_MEDIA_RESET (1 << 0) /* * Micellaneous constants */ #define IGB_INTEL_VENDOR_ID 0x8086 #define IGB_JUMBO_PBA 0x00000028 #define IGB_DEFAULT_PBA 0x00000030 #define IGB_SMARTSPEED_DOWNSHIFT 3 #define IGB_SMARTSPEED_MAX 15 #define IGB_MAX_LOOP 10 #define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : \ ((hw->mac.type <= e1000_82576) ? 16 : 8)) #define IGB_RX_HTHRESH 8 #define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ adapter->msix_mem) ? 1 : 4) #define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) #define IGB_TX_HTHRESH 1 #define IGB_TX_WTHRESH ((hw->mac.type != e1000_82575 && \ adapter->msix_mem) ? 1 : 16) #define MAX_NUM_MULTICAST_ADDRESSES 128 #define PCI_ANY_ID (~0U) #define ETHER_ALIGN 2 #define IGB_TX_BUFFER_SIZE ((uint32_t) 1514) #define IGB_FC_PAUSE_TIME 0x0680 #define IGB_EEPROM_APME 0x400; /* Queue minimum free for use */ #define IGB_QUEUE_THRESHOLD (adapter->num_tx_desc / 8) /* * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will * also optimize cache line size effect. H/W supports up to cache line size 128. */ #define IGB_DBA_ALIGN 128 #define SPEED_MODE_BIT (1<<21) /* On PCI-E MACs only */ /* PCI Config defines */ #define IGB_MSIX_BAR 3 /* Defines for printing debug information */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) -#define IGB_MAX_SCATTER 64 +#define IGB_MAX_SCATTER 40 #define IGB_VFTA_SIZE 128 #define IGB_BR_SIZE 4096 /* ring buf size */ #define IGB_TSO_SIZE (65535 + sizeof(struct ether_vlan_header)) #define IGB_TSO_SEG_SIZE 4096 /* Max dma segment size */ #define IGB_TXPBSIZE 20408 #define IGB_HDR_BUF 128 #define IGB_PKTTYPE_MASK 0x0000FFF0 #define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coalesce Flush */ #define ETH_ZLEN 60 #define ETH_ADDR_LEN 6 /* Offload bits in mbuf flag */ #if __FreeBSD_version >= 800000 #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) #else #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP) #endif /* Define the starting Interrupt rate per Queue */ #define IGB_INTS_PER_SEC 8000 #define IGB_DEFAULT_ITR ((1000000/IGB_INTS_PER_SEC) << 2) #define IGB_LINK_ITR 2000 #define I210_LINK_DELAY 1000 /* Precision Time Sync (IEEE 1588) defines */ #define ETHERTYPE_IEEE1588 0x88F7 #define PICOSECS_PER_TICK 20833 #define TSYNC_PORT 319 /* UDP port for the protocol */ /* * Bus dma allocation structure used by * e1000_dma_malloc and e1000_dma_free. */ struct igb_dma_alloc { bus_addr_t dma_paddr; caddr_t dma_vaddr; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; bus_dma_segment_t dma_seg; int dma_nseg; }; /* ** Driver queue struct: this is the interrupt container ** for the associated tx and rx ring. */ struct igb_queue { struct adapter *adapter; u32 msix; /* This queue's MSIX vector */ u32 eims; /* This queue's EIMS bit */ u32 eitr_setting; struct resource *res; void *tag; struct tx_ring *txr; struct rx_ring *rxr; struct task que_task; struct taskqueue *tq; u64 irqs; }; /* * The transmit ring, one per queue */ struct tx_ring { struct adapter *adapter; struct mtx tx_mtx; u32 me; int watchdog_time; union e1000_adv_tx_desc *tx_base; struct igb_tx_buf *tx_buffers; struct igb_dma_alloc txdma; volatile u16 tx_avail; u16 next_avail_desc; u16 next_to_clean; u16 num_desc; enum { IGB_QUEUE_IDLE = 1, IGB_QUEUE_WORKING = 2, IGB_QUEUE_HUNG = 4, IGB_QUEUE_DEPLETED = 8, } queue_status; u32 txd_cmd; bus_dma_tag_t txtag; char mtx_name[16]; #ifndef IGB_LEGACY_TX struct buf_ring *br; struct task txq_task; #endif u32 bytes; /* used for AIM */ u32 packets; /* Soft Stats */ unsigned long tso_tx; unsigned long no_tx_map_avail; unsigned long no_tx_dma_setup; u64 no_desc_avail; u64 total_packets; }; /* * Receive ring: one per queue */ struct rx_ring { struct adapter *adapter; u32 me; struct igb_dma_alloc rxdma; union e1000_adv_rx_desc *rx_base; struct lro_ctrl lro; bool lro_enabled; bool hdr_split; struct mtx rx_mtx; char mtx_name[16]; u32 next_to_refresh; u32 next_to_check; struct igb_rx_buf *rx_buffers; bus_dma_tag_t htag; /* dma tag for rx head */ bus_dma_tag_t ptag; /* dma tag for rx packet */ /* * First/last mbuf pointers, for * collecting multisegment RX packets. */ struct mbuf *fmp; struct mbuf *lmp; u32 bytes; u32 packets; int rdt; int rdh; /* Soft stats */ u64 rx_split_packets; u64 rx_discarded; u64 rx_packets; u64 rx_bytes; }; struct adapter { struct ifnet *ifp; struct e1000_hw hw; struct e1000_osdep osdep; struct device *dev; struct cdev *led_dev; struct resource *pci_mem; struct resource *msix_mem; int memrid; /* * Interrupt resources: this set is * either used for legacy, or for Link * when doing MSIX */ void *tag; struct resource *res; struct ifmedia media; struct callout timer; int msix; int if_flags; int pause_frames; struct mtx core_mtx; eventhandler_tag vlan_attach; eventhandler_tag vlan_detach; u16 num_vlans; u16 num_queues; /* ** Shadow VFTA table, this is needed because ** the real vlan filter table gets cleared during ** a soft reset and the driver needs to be able ** to repopulate it. */ u32 shadow_vfta[IGB_VFTA_SIZE]; /* Info about the interface */ u32 optics; u32 fc; /* local flow ctrl setting */ int advertise; /* link speeds */ bool link_active; u16 max_frame_size; u16 num_segs; u16 link_speed; bool link_up; u32 linkvec; u16 link_duplex; u32 dmac; int link_mask; /* Flags */ u32 flags; /* Mbuf cluster size */ u32 rx_mbuf_sz; /* Support for pluggable optics */ bool sfp_probe; struct task link_task; /* Link tasklet */ struct task mod_task; /* SFP tasklet */ struct task msf_task; /* Multispeed Fiber */ struct taskqueue *tq; /* ** Queues: ** This is the irq holder, it has ** and RX/TX pair or rings associated ** with it. */ struct igb_queue *queues; /* * Transmit rings: * Allocated at run time, an array of rings. */ struct tx_ring *tx_rings; u32 num_tx_desc; /* * Receive rings: * Allocated at run time, an array of rings. */ struct rx_ring *rx_rings; u64 que_mask; u32 num_rx_desc; /* Multicast array memory */ u8 *mta; /* Misc stats maintained by the driver */ unsigned long device_control; unsigned long dropped_pkts; unsigned long eint_mask; unsigned long int_mask; unsigned long link_irq; unsigned long mbuf_defrag_failed; unsigned long no_tx_dma_setup; unsigned long packet_buf_alloc_rx; unsigned long packet_buf_alloc_tx; unsigned long rx_control; unsigned long rx_overruns; unsigned long watchdog_events; /* Used in pf and vf */ void *stats; int enable_aim; int has_manage; int wol; int rx_process_limit; int tx_process_limit; u16 vf_ifp; /* a VF interface */ bool in_detach; /* Used only in igb_ioctl */ }; /* ****************************************************************************** * vendor_info_array * * This array contains the list of Subvendor/Subdevice IDs on which the driver * should load. * * ******************************************************************************/ typedef struct _igb_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; } igb_vendor_info_t; struct igb_tx_buf { union e1000_adv_tx_desc *eop; struct mbuf *m_head; bus_dmamap_t map; }; struct igb_rx_buf { struct mbuf *m_head; struct mbuf *m_pack; bus_dmamap_t hmap; /* bus_dma map for header */ bus_dmamap_t pmap; /* bus_dma map for packet */ }; /* ** Find the number of unrefreshed RX descriptors */ static inline u16 igb_rx_unrefreshed(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; if (rxr->next_to_check > rxr->next_to_refresh) return (rxr->next_to_check - rxr->next_to_refresh - 1); else return ((adapter->num_rx_desc + rxr->next_to_check) - rxr->next_to_refresh - 1); } #define IGB_CORE_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->core_mtx, _name, "IGB Core Lock", MTX_DEF) #define IGB_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx) #define IGB_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx) #define IGB_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx) #define IGB_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED) #define IGB_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx) #define IGB_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx) #define IGB_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx) #define IGB_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx) #define IGB_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED) #define IGB_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx) #define IGB_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx) #define IGB_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx) #define IGB_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_mtx, MA_OWNED) #define UPDATE_VF_REG(reg, last, cur) \ { \ u32 new = E1000_READ_REG(hw, reg); \ if (new < last) \ cur += 0x100000000LL; \ last = new; \ cur &= 0xFFFFFFFF00000000LL; \ cur |= new; \ } #if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504 static __inline int drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br) { #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) return (1); #endif return (!buf_ring_empty(br)); } #endif #endif /* _IF_IGB_H_ */ Index: projects/release-pkg/sys/dev/e1000/if_lem.h =================================================================== --- projects/release-pkg/sys/dev/e1000/if_lem.h (revision 295925) +++ projects/release-pkg/sys/dev/e1000/if_lem.h (revision 295926) @@ -1,521 +1,519 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _LEM_H_DEFINED_ #define _LEM_H_DEFINED_ /* Tunables */ /* * EM_TXD: Maximum number of Transmit Descriptors * Valid Range: 80-256 for 82542 and 82543-based adapters * 80-4096 for others * Default Value: 256 * This value is the number of transmit descriptors allocated by the driver. * Increasing this value allows the driver to queue more transmits. Each * descriptor is 16 bytes. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 */ #define EM_MIN_TXD 80 #define EM_MAX_TXD_82543 256 #define EM_MAX_TXD 4096 #define EM_DEFAULT_TXD EM_MAX_TXD_82543 /* * EM_RXD - Maximum number of receive Descriptors * Valid Range: 80-256 for 82542 and 82543-based adapters * 80-4096 for others * Default Value: 256 * This value is the number of receive descriptors allocated by the driver. * Increasing this value allows the driver to buffer more incoming packets. * Each descriptor is 16 bytes. A receive buffer is also allocated for each * descriptor. The maximum MTU size is 16110. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 */ #define EM_MIN_RXD 80 #define EM_MAX_RXD_82543 256 #define EM_MAX_RXD 4096 #define EM_DEFAULT_RXD EM_MAX_RXD_82543 /* * EM_TIDV - Transmit Interrupt Delay Value * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value delays the generation of transmit interrupts in units of * 1.024 microseconds. Transmit interrupt reduction can improve CPU * efficiency if properly tuned for specific network traffic. If the * system is reporting dropped transmits, this value may be set too high * causing the driver to run out of available transmit descriptors. */ #define EM_TIDV 64 /* * EM_TADV - Transmit Absolute Interrupt Delay Value * (Not valid for 82542/82543/82544) * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * transmit interrupt is generated. Useful only if EM_TIDV is non-zero, * this value ensures that an interrupt is generated after the initial * packet is sent on the wire within the set amount of time. Proper tuning, * along with EM_TIDV, may improve traffic throughput in specific * network conditions. */ #define EM_TADV 64 /* * EM_RDTR - Receive Interrupt Delay Timer (Packet Timer) * Valid Range: 0-65535 (0=off) * Default Value: 0 * This value delays the generation of receive interrupts in units of 1.024 * microseconds. Receive interrupt reduction can improve CPU efficiency if * properly tuned for specific network traffic. Increasing this value adds * extra latency to frame reception and can end up decreasing the throughput * of TCP traffic. If the system is reporting dropped receives, this value * may be set too high, causing the driver to run out of available receive * descriptors. * * CAUTION: When setting EM_RDTR to a value other than 0, adapters * may hang (stop transmitting) under certain network conditions. * If this occurs a WATCHDOG message is logged in the system * event log. In addition, the controller is automatically reset, * restoring the network connection. To eliminate the potential * for the hang ensure that EM_RDTR is set to 0. */ #define EM_RDTR 0 /* * Receive Interrupt Absolute Delay Timer (Not valid for 82542/82543/82544) * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * receive interrupt is generated. Useful only if EM_RDTR is non-zero, * this value ensures that an interrupt is generated after the initial * packet is received within the set amount of time. Proper tuning, * along with EM_RDTR, may improve traffic throughput in specific network * conditions. */ #define EM_RADV 64 /* * This parameter controls the max duration of transmit watchdog. */ #define EM_WATCHDOG (10 * hz) /* * This parameter controls when the driver calls the routine to reclaim * transmit descriptors. */ #define EM_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8) #define EM_TX_OP_THRESHOLD (adapter->num_tx_desc / 32) /* * This parameter controls whether or not autonegotation is enabled. * 0 - Disable autonegotiation * 1 - Enable autonegotiation */ #define DO_AUTO_NEG 1 /* * This parameter control whether or not the driver will wait for * autonegotiation to complete. * 1 - Wait for autonegotiation to complete * 0 - Don't wait for autonegotiation to complete */ #define WAIT_FOR_AUTO_NEG_DEFAULT 0 /* Tunables -- End */ #define AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ ADVERTISE_1000_FULL) #define AUTO_ALL_MODES 0 /* PHY master/slave setting */ #define EM_MASTER_SLAVE e1000_ms_hw_default /* * Micellaneous constants */ #define EM_VENDOR_ID 0x8086 #define EM_FLASH 0x0014 #define EM_JUMBO_PBA 0x00000028 #define EM_DEFAULT_PBA 0x00000030 #define EM_SMARTSPEED_DOWNSHIFT 3 #define EM_SMARTSPEED_MAX 15 #define EM_MAX_LOOP 10 #define MAX_NUM_MULTICAST_ADDRESSES 128 #define PCI_ANY_ID (~0U) #define ETHER_ALIGN 2 #define EM_FC_PAUSE_TIME 0x0680 #define EM_EEPROM_APME 0x400; #define EM_82544_APME 0x0004; /* Code compatilbility between 6 and 7 */ #ifndef ETHER_BPF_MTAP #define ETHER_BPF_MTAP BPF_MTAP #endif /* * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will * also optimize cache line size effect. H/W supports up to cache line size 128. */ #define EM_DBA_ALIGN 128 #define SPEED_MODE_BIT (1<<21) /* On PCI-E MACs only */ /* PCI Config defines */ #define EM_BAR_TYPE(v) ((v) & EM_BAR_TYPE_MASK) #define EM_BAR_TYPE_MASK 0x00000001 #define EM_BAR_TYPE_MMEM 0x00000000 #define EM_BAR_TYPE_IO 0x00000001 #define EM_BAR_TYPE_FLASH 0x0014 #define EM_BAR_MEM_TYPE(v) ((v) & EM_BAR_MEM_TYPE_MASK) #define EM_BAR_MEM_TYPE_MASK 0x00000006 #define EM_BAR_MEM_TYPE_32BIT 0x00000000 #define EM_BAR_MEM_TYPE_64BIT 0x00000004 #define EM_MSIX_BAR 3 /* On 82575 */ #if __FreeBSD_version < 900000 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD #endif /* Defines for printing debug information */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) -#define EM_MAX_SCATTER 64 +#define EM_MAX_SCATTER 40 #define EM_VFTA_SIZE 128 -#define EM_TSO_SIZE (65535 + sizeof(struct ether_vlan_header)) -#define EM_TSO_SEG_SIZE 4096 /* Max dma segment size */ #define EM_MSIX_MASK 0x01F00000 /* For 82574 use */ #define ETH_ZLEN 60 #define ETH_ADDR_LEN 6 #define CSUM_OFFLOAD 7 /* Offload bits in mbuf flag */ /* * 82574 has a nonstandard address for EIAC * and since its only used in MSIX, and in * the em driver only 82574 uses MSIX we can * solve it just using this define. */ #define EM_EIAC 0x000DC /* Used in for 82547 10Mb Half workaround */ #define EM_PBA_BYTES_SHIFT 0xA #define EM_TX_HEAD_ADDR_SHIFT 7 #define EM_PBA_TX_MASK 0xFFFF0000 #define EM_FIFO_HDR 0x10 #define EM_82547_PKT_THRESH 0x3e0 /* Precision Time Sync (IEEE 1588) defines */ #define ETHERTYPE_IEEE1588 0x88F7 #define PICOSECS_PER_TICK 20833 #define TSYNC_PORT 319 /* UDP port for the protocol */ #ifdef NIC_PARAVIRT #define E1000_PARA_SUBDEV 0x1101 /* special id */ #define E1000_CSBAL 0x02830 /* csb phys. addr. low */ #define E1000_CSBAH 0x02834 /* csb phys. addr. hi */ #include #endif /* NIC_PARAVIRT */ /* * Bus dma allocation structure used by * e1000_dma_malloc and e1000_dma_free. */ struct em_dma_alloc { bus_addr_t dma_paddr; caddr_t dma_vaddr; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; bus_dma_segment_t dma_seg; int dma_nseg; }; struct adapter; struct em_int_delay_info { struct adapter *adapter; /* Back-pointer to the adapter struct */ int offset; /* Register offset to read/write */ int value; /* Current value in usecs */ }; /* Our adapter structure */ struct adapter { if_t ifp; struct e1000_hw hw; /* FreeBSD operating-system-specific structures. */ struct e1000_osdep osdep; struct device *dev; struct cdev *led_dev; struct resource *memory; struct resource *flash; struct resource *msix; struct resource *ioport; int io_rid; /* 82574 may use 3 int vectors */ struct resource *res[3]; void *tag[3]; int rid[3]; struct ifmedia media; struct callout timer; struct callout tx_fifo_timer; bool watchdog_check; int watchdog_time; int msi; int if_flags; int max_frame_size; int min_frame_size; struct mtx core_mtx; struct mtx tx_mtx; struct mtx rx_mtx; int em_insert_vlan_header; /* Task for FAST handling */ struct task link_task; struct task rxtx_task; struct task rx_task; struct task tx_task; struct taskqueue *tq; /* private task queue */ eventhandler_tag vlan_attach; eventhandler_tag vlan_detach; u32 num_vlans; /* Management and WOL features */ u32 wol; bool has_manage; bool has_amt; /* Multicast array memory */ u8 *mta; /* ** Shadow VFTA table, this is needed because ** the real vlan filter table gets cleared during ** a soft reset and the driver needs to be able ** to repopulate it. */ u32 shadow_vfta[EM_VFTA_SIZE]; /* Info about the interface */ uint8_t link_active; uint16_t link_speed; uint16_t link_duplex; uint32_t smartspeed; uint32_t fc_setting; struct em_int_delay_info tx_int_delay; struct em_int_delay_info tx_abs_int_delay; struct em_int_delay_info rx_int_delay; struct em_int_delay_info rx_abs_int_delay; struct em_int_delay_info tx_itr; /* * Transmit definitions * * We have an array of num_tx_desc descriptors (handled * by the controller) paired with an array of tx_buffers * (at tx_buffer_area). * The index of the next available descriptor is next_avail_tx_desc. * The number of remaining tx_desc is num_tx_desc_avail. */ struct em_dma_alloc txdma; /* bus_dma glue for tx desc */ struct e1000_tx_desc *tx_desc_base; uint32_t next_avail_tx_desc; uint32_t next_tx_to_clean; volatile uint16_t num_tx_desc_avail; uint16_t num_tx_desc; uint16_t last_hw_offload; uint32_t txd_cmd; struct em_buffer *tx_buffer_area; bus_dma_tag_t txtag; /* dma tag for tx */ uint32_t tx_tso; /* last tx was tso */ /* * Receive definitions * * we have an array of num_rx_desc rx_desc (handled by the * controller), and paired with an array of rx_buffers * (at rx_buffer_area). * The next pair to check on receive is at offset next_rx_desc_to_check */ struct em_dma_alloc rxdma; /* bus_dma glue for rx desc */ struct e1000_rx_desc *rx_desc_base; uint32_t next_rx_desc_to_check; uint32_t rx_buffer_len; uint16_t num_rx_desc; int rx_process_limit; struct em_buffer *rx_buffer_area; bus_dma_tag_t rxtag; bus_dmamap_t rx_sparemap; /* * First/last mbuf pointers, for * collecting multisegment RX packets. */ struct mbuf *fmp; struct mbuf *lmp; /* Misc stats maintained by the driver */ unsigned long dropped_pkts; unsigned long link_irq; unsigned long mbuf_cluster_failed; unsigned long mbuf_defrag_failed; unsigned long no_tx_desc_avail1; unsigned long no_tx_desc_avail2; unsigned long no_tx_dma_setup; unsigned long no_tx_map_avail; unsigned long watchdog_events; unsigned long rx_irq; unsigned long rx_overruns; unsigned long tx_irq; /* 82547 workaround */ uint32_t tx_fifo_size; uint32_t tx_fifo_head; uint32_t tx_fifo_head_addr; uint64_t tx_fifo_reset_cnt; uint64_t tx_fifo_wrk_cnt; uint32_t tx_head_addr; /* For 82544 PCIX Workaround */ boolean_t pcix_82544; boolean_t in_detach; #ifdef NIC_SEND_COMBINING /* 0 = idle; 1xxxx int-pending; 3xxxx int + d pending + tdt */ #define MIT_PENDING_INT 0x10000 /* pending interrupt */ #define MIT_PENDING_TDT 0x30000 /* both intr and tdt write are pending */ uint32_t shadow_tdt; uint32_t sc_enable; #endif /* NIC_SEND_COMBINING */ #ifdef BATCH_DISPATCH uint32_t batch_enable; #endif /* BATCH_DISPATCH */ #ifdef NIC_PARAVIRT struct em_dma_alloc csb_mem; /* phys address */ struct paravirt_csb *csb; /* virtual addr */ uint32_t rx_retries; /* optimize rx loop */ uint32_t tdt_csb_count;// XXX stat uint32_t tdt_reg_count;// XXX stat uint32_t tdt_int_count;// XXX stat uint32_t guest_need_kick_count;// XXX stat #endif /* NIC_PARAVIRT */ struct e1000_hw_stats stats; }; /* ****************************************************************************** * vendor_info_array * * This array contains the list of Subvendor/Subdevice IDs on which the driver * should load. * * ******************************************************************************/ typedef struct _em_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; } em_vendor_info_t; struct em_buffer { int next_eop; /* Index of the desc to watch */ struct mbuf *m_head; bus_dmamap_t map; /* bus_dma map for packet */ }; /* For 82544 PCIX Workaround */ typedef struct _ADDRESS_LENGTH_PAIR { uint64_t address; uint32_t length; } ADDRESS_LENGTH_PAIR, *PADDRESS_LENGTH_PAIR; typedef struct _DESCRIPTOR_PAIR { ADDRESS_LENGTH_PAIR descriptor[4]; uint32_t elements; } DESC_ARRAY, *PDESC_ARRAY; #define EM_CORE_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->core_mtx, _name, "EM Core Lock", MTX_DEF) #define EM_TX_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->tx_mtx, _name, "EM TX Lock", MTX_DEF) #define EM_RX_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->rx_mtx, _name, "EM RX Lock", MTX_DEF) #define EM_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx) #define EM_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx) #define EM_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx) #define EM_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx) #define EM_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx) #define EM_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx) #define EM_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx) #define EM_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx) #define EM_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx) #define EM_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx) #define EM_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED) #define EM_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED) #endif /* _LEM_H_DEFINED_ */ Index: projects/release-pkg/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c =================================================================== --- projects/release-pkg/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c (revision 295925) +++ projects/release-pkg/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c (revision 295926) @@ -1,2472 +1,2492 @@ /*- * Copyright (c) 2010-2012 Citrix Inc. * Copyright (c) 2009-2012 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 2004-2006 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet6.h" #include "opt_inet.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "hv_net_vsc.h" #include "hv_rndis.h" #include "hv_rndis_filter.h" /* Short for Hyper-V network interface */ #define NETVSC_DEVNAME "hn" /* * It looks like offset 0 of buf is reserved to hold the softc pointer. * The sc pointer evidently not needed, and is not presently populated. * The packet offset is where the netvsc_packet starts in the buffer. */ #define HV_NV_SC_PTR_OFFSET_IN_BUF 0 #define HV_NV_PACKET_OFFSET_IN_BUF 16 /* YYY should get it from the underlying channel */ #define HN_TX_DESC_CNT 512 #define HN_LROENT_CNT_DEF 128 #define HN_RNDIS_MSG_LEN \ (sizeof(rndis_msg) + \ RNDIS_VLAN_PPI_SIZE + \ RNDIS_TSO_PPI_SIZE + \ RNDIS_CSUM_PPI_SIZE) #define HN_RNDIS_MSG_BOUNDARY PAGE_SIZE #define HN_RNDIS_MSG_ALIGN CACHE_LINE_SIZE #define HN_TX_DATA_BOUNDARY PAGE_SIZE #define HN_TX_DATA_MAXSIZE IP_MAXPACKET #define HN_TX_DATA_SEGSIZE PAGE_SIZE #define HN_TX_DATA_SEGCNT_MAX \ (NETVSC_PACKET_MAXPAGE - HV_RF_NUM_TX_RESERVED_PAGE_BUFS) #define HN_DIRECT_TX_SIZE_DEF 128 struct hn_txdesc { #ifndef HN_USE_TXDESC_BUFRING SLIST_ENTRY(hn_txdesc) link; #endif struct mbuf *m; struct hn_tx_ring *txr; int refs; uint32_t flags; /* HN_TXD_FLAG_ */ netvsc_packet netvsc_pkt; /* XXX to be removed */ bus_dmamap_t data_dmap; bus_addr_t rndis_msg_paddr; rndis_msg *rndis_msg; bus_dmamap_t rndis_msg_dmap; }; #define HN_TXD_FLAG_ONLIST 0x1 #define HN_TXD_FLAG_DMAMAP 0x2 /* * Only enable UDP checksum offloading when it is on 2012R2 or * later. UDP checksum offloading doesn't work on earlier * Windows releases. */ #define HN_CSUM_ASSIST_WIN8 (CSUM_IP | CSUM_TCP) #define HN_CSUM_ASSIST (CSUM_IP | CSUM_UDP | CSUM_TCP) #define HN_LRO_LENLIM_DEF (25 * ETHERMTU) /* YYY 2*MTU is a bit rough, but should be good enough. */ #define HN_LRO_LENLIM_MIN(ifp) (2 * (ifp)->if_mtu) #define HN_LRO_ACKCNT_DEF 1 /* * Be aware that this sleepable mutex will exhibit WITNESS errors when * certain TCP and ARP code paths are taken. This appears to be a * well-known condition, as all other drivers checked use a sleeping * mutex to protect their transmit paths. * Also Be aware that mutexes do not play well with semaphores, and there * is a conflicting semaphore in a certain channel code path. */ #define NV_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->hn_lock, _name, MTX_NETWORK_LOCK, MTX_DEF) #define NV_LOCK(_sc) mtx_lock(&(_sc)->hn_lock) #define NV_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->hn_lock, MA_OWNED) #define NV_UNLOCK(_sc) mtx_unlock(&(_sc)->hn_lock) #define NV_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->hn_lock) /* * Globals */ int hv_promisc_mode = 0; /* normal mode by default */ SYSCTL_NODE(_hw, OID_AUTO, hn, CTLFLAG_RD, NULL, "Hyper-V network interface"); /* Trust tcp segements verification on host side. */ static int hn_trust_hosttcp = 1; SYSCTL_INT(_hw_hn, OID_AUTO, trust_hosttcp, CTLFLAG_RDTUN, &hn_trust_hosttcp, 0, "Trust tcp segement verification on host side, " "when csum info is missing (global setting)"); /* Trust udp datagrams verification on host side. */ static int hn_trust_hostudp = 1; SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostudp, CTLFLAG_RDTUN, &hn_trust_hostudp, 0, "Trust udp datagram verification on host side, " "when csum info is missing (global setting)"); /* Trust ip packets verification on host side. */ static int hn_trust_hostip = 1; SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostip, CTLFLAG_RDTUN, &hn_trust_hostip, 0, "Trust ip packet verification on host side, " "when csum info is missing (global setting)"); #if __FreeBSD_version >= 1100045 /* Limit TSO burst size */ static int hn_tso_maxlen = 0; SYSCTL_INT(_hw_hn, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN, &hn_tso_maxlen, 0, "TSO burst limit"); #endif /* Limit chimney send size */ static int hn_tx_chimney_size = 0; SYSCTL_INT(_hw_hn, OID_AUTO, tx_chimney_size, CTLFLAG_RDTUN, &hn_tx_chimney_size, 0, "Chimney send packet size limit"); /* Limit the size of packet for direct transmission */ static int hn_direct_tx_size = HN_DIRECT_TX_SIZE_DEF; SYSCTL_INT(_hw_hn, OID_AUTO, direct_tx_size, CTLFLAG_RDTUN, &hn_direct_tx_size, 0, "Size of the packet for direct transmission"); #if defined(INET) || defined(INET6) #if __FreeBSD_version >= 1100095 static int hn_lro_entry_count = HN_LROENT_CNT_DEF; SYSCTL_INT(_hw_hn, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN, &hn_lro_entry_count, 0, "LRO entry count"); #endif #endif static int hn_share_tx_taskq = 0; SYSCTL_INT(_hw_hn, OID_AUTO, share_tx_taskq, CTLFLAG_RDTUN, &hn_share_tx_taskq, 0, "Enable shared TX taskqueue"); static struct taskqueue *hn_tx_taskq; #ifndef HN_USE_TXDESC_BUFRING static int hn_use_txdesc_bufring = 0; #else static int hn_use_txdesc_bufring = 1; #endif SYSCTL_INT(_hw_hn, OID_AUTO, use_txdesc_bufring, CTLFLAG_RD, &hn_use_txdesc_bufring, 0, "Use buf_ring for TX descriptors"); static int hn_bind_tx_taskq = -1; SYSCTL_INT(_hw_hn, OID_AUTO, bind_tx_taskq, CTLFLAG_RDTUN, &hn_bind_tx_taskq, 0, "Bind TX taskqueue to the specified cpu"); /* * Forward declarations */ static void hn_stop(hn_softc_t *sc); static void hn_ifinit_locked(hn_softc_t *sc); static void hn_ifinit(void *xsc); static int hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static int hn_start_locked(struct hn_tx_ring *txr, int len); static void hn_start(struct ifnet *ifp); static void hn_start_txeof(struct hn_tx_ring *); static int hn_ifmedia_upd(struct ifnet *ifp); static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS); static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS); static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS); static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS); static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS); static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS); static int hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS); static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS); static int hn_check_iplen(const struct mbuf *, int); static int hn_create_tx_ring(struct hn_softc *, int); static void hn_destroy_tx_ring(struct hn_tx_ring *); static int hn_create_tx_data(struct hn_softc *); static void hn_destroy_tx_data(struct hn_softc *); static void hn_start_taskfunc(void *, int); static void hn_start_txeof_taskfunc(void *, int); static void hn_stop_tx_tasks(struct hn_softc *); static int hn_encap(struct hn_tx_ring *, struct hn_txdesc *, struct mbuf **); static void hn_create_rx_data(struct hn_softc *sc); static void hn_destroy_rx_data(struct hn_softc *sc); static void hn_set_tx_chimney_size(struct hn_softc *, int); static int hn_ifmedia_upd(struct ifnet *ifp __unused) { return EOPNOTSUPP; } static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct hn_softc *sc = ifp->if_softc; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!sc->hn_carrier) { ifmr->ifm_active |= IFM_NONE; return; } ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_10G_T | IFM_FDX; } /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */ static const hv_guid g_net_vsc_device_type = { .data = {0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46, 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E} }; /* * Standard probe entry point. * */ static int netvsc_probe(device_t dev) { const char *p; p = vmbus_get_type(dev); if (!memcmp(p, &g_net_vsc_device_type.data, sizeof(hv_guid))) { device_set_desc(dev, "Synthetic Network Interface"); if (bootverbose) printf("Netvsc probe... DONE \n"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Standard attach entry point. * * Called when the driver is loaded. It allocates needed resources, * and initializes the "hardware" and software. */ static int netvsc_attach(device_t dev) { struct hv_device *device_ctx = vmbus_get_devctx(dev); netvsc_device_info device_info; hn_softc_t *sc; int unit = device_get_unit(dev); struct ifnet *ifp = NULL; int error; #if __FreeBSD_version >= 1100045 int tso_maxlen; #endif sc = device_get_softc(dev); if (sc == NULL) { return (ENOMEM); } bzero(sc, sizeof(hn_softc_t)); sc->hn_unit = unit; sc->hn_dev = dev; if (hn_tx_taskq == NULL) { sc->hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK, taskqueue_thread_enqueue, &sc->hn_tx_taskq); if (hn_bind_tx_taskq >= 0) { int cpu = hn_bind_tx_taskq; cpuset_t cpu_set; if (cpu > mp_ncpus - 1) cpu = mp_ncpus - 1; CPU_SETOF(cpu, &cpu_set); taskqueue_start_threads_cpuset(&sc->hn_tx_taskq, 1, PI_NET, &cpu_set, "%s tx", device_get_nameunit(dev)); } else { taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET, "%s tx", device_get_nameunit(dev)); } } else { sc->hn_tx_taskq = hn_tx_taskq; } NV_LOCK_INIT(sc, "NetVSCLock"); sc->hn_dev_obj = device_ctx; ifp = sc->hn_ifp = if_alloc(IFT_ETHER); ifp->if_softc = sc; error = hn_create_tx_data(sc); if (error) goto failed; hn_create_rx_data(sc); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_dunit = unit; ifp->if_dname = NETVSC_DEVNAME; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = hn_ioctl; ifp->if_start = hn_start; ifp->if_init = hn_ifinit; /* needed by hv_rf_on_device_add() code */ ifp->if_mtu = ETHERMTU; IFQ_SET_MAXLEN(&ifp->if_snd, 512); ifp->if_snd.ifq_drv_maxlen = 511; IFQ_SET_READY(&ifp->if_snd); ifmedia_init(&sc->hn_media, 0, hn_ifmedia_upd, hn_ifmedia_sts); ifmedia_add(&sc->hn_media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->hn_media, IFM_ETHER | IFM_AUTO); /* XXX ifmedia_set really should do this for us */ sc->hn_media.ifm_media = sc->hn_media.ifm_cur->ifm_media; /* * Tell upper layers that we support full VLAN capability. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO | IFCAP_LRO; ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO | IFCAP_LRO; ifp->if_hwassist = sc->hn_tx_ring[0].hn_csum_assist | CSUM_TSO; error = hv_rf_on_device_add(device_ctx, &device_info); if (error) goto failed; if (device_info.link_state == 0) { sc->hn_carrier = 1; } #if __FreeBSD_version >= 1100045 tso_maxlen = hn_tso_maxlen; if (tso_maxlen <= 0 || tso_maxlen > IP_MAXPACKET) tso_maxlen = IP_MAXPACKET; ifp->if_hw_tsomaxsegcount = HN_TX_DATA_SEGCNT_MAX; ifp->if_hw_tsomaxsegsize = PAGE_SIZE; ifp->if_hw_tsomax = tso_maxlen - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); #endif ether_ifattach(ifp, device_info.mac_addr); #if __FreeBSD_version >= 1100045 if_printf(ifp, "TSO: %u/%u/%u\n", ifp->if_hw_tsomax, ifp->if_hw_tsomaxsegcount, ifp->if_hw_tsomaxsegsize); #endif sc->hn_tx_chimney_max = sc->net_dev->send_section_size; hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max); if (hn_tx_chimney_size > 0 && hn_tx_chimney_size < sc->hn_tx_chimney_max) hn_set_tx_chimney_size(sc, hn_tx_chimney_size); return (0); failed: hn_destroy_tx_data(sc); if (ifp != NULL) if_free(ifp); return (error); } /* * Standard detach entry point */ static int netvsc_detach(device_t dev) { struct hn_softc *sc = device_get_softc(dev); struct hv_device *hv_device = vmbus_get_devctx(dev); if (bootverbose) printf("netvsc_detach\n"); /* * XXXKYS: Need to clean up all our * driver state; this is the driver * unloading. */ /* * XXXKYS: Need to stop outgoing traffic and unregister * the netdevice. */ hv_rf_on_device_remove(hv_device, HV_RF_NV_DESTROY_CHANNEL); hn_stop_tx_tasks(sc); ifmedia_removeall(&sc->hn_media); hn_destroy_rx_data(sc); hn_destroy_tx_data(sc); if (sc->hn_tx_taskq != hn_tx_taskq) taskqueue_free(sc->hn_tx_taskq); return (0); } /* * Standard shutdown entry point */ static int netvsc_shutdown(device_t dev) { return (0); } static __inline int hn_txdesc_dmamap_load(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head, bus_dma_segment_t *segs, int *nsegs) { struct mbuf *m = *m_head; int error; error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { struct mbuf *m_new; m_new = m_collapse(m, M_NOWAIT, HN_TX_DATA_SEGCNT_MAX); if (m_new == NULL) return ENOBUFS; else *m_head = m = m_new; txr->hn_tx_collapsed++; error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT); } if (!error) { bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap, BUS_DMASYNC_PREWRITE); txd->flags |= HN_TXD_FLAG_DMAMAP; } return error; } static __inline void hn_txdesc_dmamap_unload(struct hn_tx_ring *txr, struct hn_txdesc *txd) { if (txd->flags & HN_TXD_FLAG_DMAMAP) { bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->hn_tx_data_dtag, txd->data_dmap); txd->flags &= ~HN_TXD_FLAG_DMAMAP; } } static __inline int hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd) { KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0, ("put an onlist txd %#x", txd->flags)); KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs)); if (atomic_fetchadd_int(&txd->refs, -1) != 1) return 0; hn_txdesc_dmamap_unload(txr, txd); if (txd->m != NULL) { m_freem(txd->m); txd->m = NULL; } txd->flags |= HN_TXD_FLAG_ONLIST; #ifndef HN_USE_TXDESC_BUFRING mtx_lock_spin(&txr->hn_txlist_spin); KASSERT(txr->hn_txdesc_avail >= 0 && txr->hn_txdesc_avail < txr->hn_txdesc_cnt, ("txdesc_put: invalid txd avail %d", txr->hn_txdesc_avail)); txr->hn_txdesc_avail++; SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link); mtx_unlock_spin(&txr->hn_txlist_spin); #else atomic_add_int(&txr->hn_txdesc_avail, 1); buf_ring_enqueue(txr->hn_txdesc_br, txd); #endif return 1; } static __inline struct hn_txdesc * hn_txdesc_get(struct hn_tx_ring *txr) { struct hn_txdesc *txd; #ifndef HN_USE_TXDESC_BUFRING mtx_lock_spin(&txr->hn_txlist_spin); txd = SLIST_FIRST(&txr->hn_txlist); if (txd != NULL) { KASSERT(txr->hn_txdesc_avail > 0, ("txdesc_get: invalid txd avail %d", txr->hn_txdesc_avail)); txr->hn_txdesc_avail--; SLIST_REMOVE_HEAD(&txr->hn_txlist, link); } mtx_unlock_spin(&txr->hn_txlist_spin); #else txd = buf_ring_dequeue_sc(txr->hn_txdesc_br); #endif if (txd != NULL) { #ifdef HN_USE_TXDESC_BUFRING atomic_subtract_int(&txr->hn_txdesc_avail, 1); #endif KASSERT(txd->m == NULL && txd->refs == 0 && (txd->flags & HN_TXD_FLAG_ONLIST), ("invalid txd")); txd->flags &= ~HN_TXD_FLAG_ONLIST; txd->refs = 1; } return txd; } static __inline void hn_txdesc_hold(struct hn_txdesc *txd) { /* 0->1 transition will never work */ KASSERT(txd->refs > 0, ("invalid refs %d", txd->refs)); atomic_add_int(&txd->refs, 1); } static void hn_tx_done(void *xpkt) { netvsc_packet *packet = xpkt; struct hn_txdesc *txd; struct hn_tx_ring *txr; txd = (struct hn_txdesc *)(uintptr_t) packet->compl.send.send_completion_tid; txr = txd->txr; txr->hn_has_txeof = 1; hn_txdesc_put(txr, txd); } void netvsc_channel_rollup(struct hv_device *device_ctx) { struct hn_softc *sc = device_get_softc(device_ctx->device); struct hn_tx_ring *txr = &sc->hn_tx_ring[0]; /* TODO: vRSS */ #if defined(INET) || defined(INET6) struct hn_rx_ring *rxr = &sc->hn_rx_ring[0]; /* TODO: vRSS */ struct lro_ctrl *lro = &rxr->hn_lro; struct lro_entry *queued; while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } #endif if (!txr->hn_has_txeof) return; txr->hn_has_txeof = 0; txr->hn_txeof(txr); } /* * NOTE: * If this function fails, then both txd and m_head0 will be freed. */ static int hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0) { bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX]; int error, nsegs, i; struct mbuf *m_head = *m_head0; netvsc_packet *packet; rndis_msg *rndis_mesg; rndis_packet *rndis_pkt; rndis_per_packet_info *rppi; uint32_t rndis_msg_size; packet = &txd->netvsc_pkt; packet->is_data_pkt = TRUE; packet->tot_data_buf_len = m_head->m_pkthdr.len; /* * extension points to the area reserved for the * rndis_filter_packet, which is placed just after * the netvsc_packet (and rppi struct, if present; * length is updated later). */ rndis_mesg = txd->rndis_msg; /* XXX not necessary */ memset(rndis_mesg, 0, HN_RNDIS_MSG_LEN); rndis_mesg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG; rndis_pkt = &rndis_mesg->msg.packet; rndis_pkt->data_offset = sizeof(rndis_packet); rndis_pkt->data_length = packet->tot_data_buf_len; rndis_pkt->per_pkt_info_offset = sizeof(rndis_packet); rndis_msg_size = RNDIS_MESSAGE_SIZE(rndis_packet); if (m_head->m_flags & M_VLANTAG) { ndis_8021q_info *rppi_vlan_info; rndis_msg_size += RNDIS_VLAN_PPI_SIZE; rppi = hv_set_rppi_data(rndis_mesg, RNDIS_VLAN_PPI_SIZE, ieee_8021q_info); rppi_vlan_info = (ndis_8021q_info *)((uint8_t *)rppi + rppi->per_packet_info_offset); rppi_vlan_info->u1.s1.vlan_id = m_head->m_pkthdr.ether_vtag & 0xfff; } if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { rndis_tcp_tso_info *tso_info; struct ether_vlan_header *eh; int ether_len; /* * XXX need m_pullup and use mtodo */ eh = mtod(m_head, struct ether_vlan_header*); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ether_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; else ether_len = ETHER_HDR_LEN; rndis_msg_size += RNDIS_TSO_PPI_SIZE; rppi = hv_set_rppi_data(rndis_mesg, RNDIS_TSO_PPI_SIZE, tcp_large_send_info); tso_info = (rndis_tcp_tso_info *)((uint8_t *)rppi + rppi->per_packet_info_offset); tso_info->lso_v2_xmit.type = RNDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; #ifdef INET if (m_head->m_pkthdr.csum_flags & CSUM_IP_TSO) { struct ip *ip = (struct ip *)(m_head->m_data + ether_len); unsigned long iph_len = ip->ip_hl << 2; struct tcphdr *th = (struct tcphdr *)((caddr_t)ip + iph_len); tso_info->lso_v2_xmit.ip_version = RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; ip->ip_len = 0; ip->ip_sum = 0; th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); } #endif #if defined(INET6) && defined(INET) else #endif #ifdef INET6 { struct ip6_hdr *ip6 = (struct ip6_hdr *) (m_head->m_data + ether_len); struct tcphdr *th = (struct tcphdr *)(ip6 + 1); tso_info->lso_v2_xmit.ip_version = RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; ip6->ip6_plen = 0; th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); } #endif tso_info->lso_v2_xmit.tcp_header_offset = 0; tso_info->lso_v2_xmit.mss = m_head->m_pkthdr.tso_segsz; } else if (m_head->m_pkthdr.csum_flags & txr->hn_csum_assist) { rndis_tcp_ip_csum_info *csum_info; rndis_msg_size += RNDIS_CSUM_PPI_SIZE; rppi = hv_set_rppi_data(rndis_mesg, RNDIS_CSUM_PPI_SIZE, tcpip_chksum_info); csum_info = (rndis_tcp_ip_csum_info *)((uint8_t *)rppi + rppi->per_packet_info_offset); csum_info->xmit.is_ipv4 = 1; if (m_head->m_pkthdr.csum_flags & CSUM_IP) csum_info->xmit.ip_header_csum = 1; if (m_head->m_pkthdr.csum_flags & CSUM_TCP) { csum_info->xmit.tcp_csum = 1; csum_info->xmit.tcp_header_offset = 0; } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) { csum_info->xmit.udp_csum = 1; } } rndis_mesg->msg_len = packet->tot_data_buf_len + rndis_msg_size; packet->tot_data_buf_len = rndis_mesg->msg_len; /* * Chimney send, if the packet could fit into one chimney buffer. */ if (packet->tot_data_buf_len < txr->hn_tx_chimney_size) { netvsc_dev *net_dev = txr->hn_sc->net_dev; uint32_t send_buf_section_idx; send_buf_section_idx = hv_nv_get_next_send_section(net_dev); if (send_buf_section_idx != NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) { uint8_t *dest = ((uint8_t *)net_dev->send_buf + (send_buf_section_idx * net_dev->send_section_size)); memcpy(dest, rndis_mesg, rndis_msg_size); dest += rndis_msg_size; m_copydata(m_head, 0, m_head->m_pkthdr.len, dest); packet->send_buf_section_idx = send_buf_section_idx; packet->send_buf_section_size = packet->tot_data_buf_len; packet->page_buf_count = 0; txr->hn_tx_chimney++; goto done; } } error = hn_txdesc_dmamap_load(txr, txd, &m_head, segs, &nsegs); if (error) { int freed; /* * This mbuf is not linked w/ the txd yet, so free it now. */ m_freem(m_head); *m_head0 = NULL; freed = hn_txdesc_put(txr, txd); KASSERT(freed != 0, ("fail to free txd upon txdma error")); txr->hn_txdma_failed++; if_inc_counter(txr->hn_sc->hn_ifp, IFCOUNTER_OERRORS, 1); return error; } *m_head0 = m_head; packet->page_buf_count = nsegs + HV_RF_NUM_TX_RESERVED_PAGE_BUFS; /* send packet with page buffer */ packet->page_buffers[0].pfn = atop(txd->rndis_msg_paddr); packet->page_buffers[0].offset = txd->rndis_msg_paddr & PAGE_MASK; packet->page_buffers[0].length = rndis_msg_size; /* * Fill the page buffers with mbuf info starting at index * HV_RF_NUM_TX_RESERVED_PAGE_BUFS. */ for (i = 0; i < nsegs; ++i) { hv_vmbus_page_buffer *pb = &packet->page_buffers[ i + HV_RF_NUM_TX_RESERVED_PAGE_BUFS]; pb->pfn = atop(segs[i].ds_addr); pb->offset = segs[i].ds_addr & PAGE_MASK; pb->length = segs[i].ds_len; } packet->send_buf_section_idx = NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX; packet->send_buf_section_size = 0; done: txd->m = m_head; /* Set the completion routine */ packet->compl.send.on_send_completion = hn_tx_done; packet->compl.send.send_completion_context = packet; packet->compl.send.send_completion_tid = (uint64_t)(uintptr_t)txd; return 0; } /* + * NOTE: + * If this function fails, then txd will be freed, but the mbuf + * associated w/ the txd will _not_ be freed. + */ +static int +hn_send_pkt(struct ifnet *ifp, struct hv_device *device_ctx, + struct hn_tx_ring *txr, struct hn_txdesc *txd) +{ + int error, send_failed = 0; + +again: + /* + * Make sure that txd is not freed before ETHER_BPF_MTAP. + */ + hn_txdesc_hold(txd); + error = hv_nv_on_send(device_ctx, &txd->netvsc_pkt); + if (!error) { + ETHER_BPF_MTAP(ifp, txd->m); + if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); + } + hn_txdesc_put(txr, txd); + + if (__predict_false(error)) { + int freed; + + /* + * This should "really rarely" happen. + * + * XXX Too many RX to be acked or too many sideband + * commands to run? Ask netvsc_channel_rollup() + * to kick start later. + */ + txr->hn_has_txeof = 1; + if (!send_failed) { + txr->hn_send_failed++; + send_failed = 1; + /* + * Try sending again after set hn_has_txeof; + * in case that we missed the last + * netvsc_channel_rollup(). + */ + goto again; + } + if_printf(ifp, "send failed\n"); + + /* + * Caller will perform further processing on the + * associated mbuf, so don't free it in hn_txdesc_put(); + * only unload it from the DMA map in hn_txdesc_put(), + * if it was loaded. + */ + txd->m = NULL; + freed = hn_txdesc_put(txr, txd); + KASSERT(freed != 0, + ("fail to free txd upon send error")); + + txr->hn_send_failed++; + } + return error; +} + +/* * Start a transmit of one or more packets */ static int hn_start_locked(struct hn_tx_ring *txr, int len) { struct hn_softc *sc = txr->hn_sc; struct ifnet *ifp = sc->hn_ifp; struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev); KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring")); mtx_assert(&txr->hn_tx_lock, MA_OWNED); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return 0; while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { - int error, send_failed = 0; struct hn_txdesc *txd; struct mbuf *m_head; + int error; IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (len > 0 && m_head->m_pkthdr.len > len) { /* * This sending could be time consuming; let callers * dispatch this packet sending (and sending of any * following up packets) to tx taskqueue. */ - IF_PREPEND(&ifp->if_snd, m_head); + IFQ_DRV_PREPEND(&ifp->if_snd, m_head); return 1; } txd = hn_txdesc_get(txr); if (txd == NULL) { txr->hn_no_txdescs++; - IF_PREPEND(&ifp->if_snd, m_head); + IFQ_DRV_PREPEND(&ifp->if_snd, m_head); atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); break; } error = hn_encap(txr, txd, &m_head); if (error) { /* Both txd and m_head are freed */ continue; } -again: - /* - * Make sure that txd is not freed before ETHER_BPF_MTAP. - */ - hn_txdesc_hold(txd); - error = hv_nv_on_send(device_ctx, &txd->netvsc_pkt); - if (!error) { - ETHER_BPF_MTAP(ifp, m_head); - if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); - } - hn_txdesc_put(txr, txd); + error = hn_send_pkt(ifp, device_ctx, txr, txd); if (__predict_false(error)) { - int freed; - - /* - * This should "really rarely" happen. - * - * XXX Too many RX to be acked or too many sideband - * commands to run? Ask netvsc_channel_rollup() - * to kick start later. - */ - txr->hn_has_txeof = 1; - if (!send_failed) { - txr->hn_send_failed++; - send_failed = 1; - /* - * Try sending again after set hn_has_txeof; - * in case that we missed the last - * netvsc_channel_rollup(). - */ - goto again; - } - if_printf(ifp, "send failed\n"); - - /* - * This mbuf will be prepended, don't free it - * in hn_txdesc_put(); only unload it from the - * DMA map in hn_txdesc_put(), if it was loaded. - */ - txd->m = NULL; - freed = hn_txdesc_put(txr, txd); - KASSERT(freed != 0, - ("fail to free txd upon send error")); - - txr->hn_send_failed++; - IF_PREPEND(&ifp->if_snd, m_head); + /* txd is freed, but m_head is not */ + IFQ_DRV_PREPEND(&ifp->if_snd, m_head); atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); break; } } return 0; } /* * Link up/down notification */ void netvsc_linkstatus_callback(struct hv_device *device_obj, uint32_t status) { hn_softc_t *sc = device_get_softc(device_obj->device); if (sc == NULL) { return; } if (status == 1) { sc->hn_carrier = 1; } else { sc->hn_carrier = 0; } } /* * Append the specified data to the indicated mbuf chain, * Extend the mbuf chain if the new data does not fit in * existing space. * * This is a minor rewrite of m_append() from sys/kern/uipc_mbuf.c. * There should be an equivalent in the kernel mbuf code, * but there does not appear to be one yet. * * Differs from m_append() in that additional mbufs are * allocated with cluster size MJUMPAGESIZE, and filled * accordingly. * * Return 1 if able to complete the job; otherwise 0. */ static int hv_m_append(struct mbuf *m0, int len, c_caddr_t cp) { struct mbuf *m, *n; int remainder, space; for (m = m0; m->m_next != NULL; m = m->m_next) ; remainder = len; space = M_TRAILINGSPACE(m); if (space > 0) { /* * Copy into available space. */ if (space > remainder) space = remainder; bcopy(cp, mtod(m, caddr_t) + m->m_len, space); m->m_len += space; cp += space; remainder -= space; } while (remainder > 0) { /* * Allocate a new mbuf; could check space * and allocate a cluster instead. */ n = m_getjcl(M_NOWAIT, m->m_type, 0, MJUMPAGESIZE); if (n == NULL) break; n->m_len = min(MJUMPAGESIZE, remainder); bcopy(cp, mtod(n, caddr_t), n->m_len); cp += n->m_len; remainder -= n->m_len; m->m_next = n; m = n; } if (m0->m_flags & M_PKTHDR) m0->m_pkthdr.len += len - remainder; return (remainder == 0); } /* * Called when we receive a data packet from the "wire" on the * specified device * * Note: This is no longer used as a callback */ int netvsc_recv(struct hv_device *device_ctx, netvsc_packet *packet, rndis_tcp_ip_csum_info *csum_info) { struct hn_softc *sc = device_get_softc(device_ctx->device); struct hn_rx_ring *rxr = &sc->hn_rx_ring[0]; /* TODO: vRSS */ struct mbuf *m_new; struct ifnet *ifp; int size, do_lro = 0, do_csum = 1; if (sc == NULL) { return (0); /* TODO: KYS how can this be! */ } ifp = sc->hn_ifp; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { return (0); } /* * Bail out if packet contains more data than configured MTU. */ if (packet->tot_data_buf_len > (ifp->if_mtu + ETHER_HDR_LEN)) { return (0); } else if (packet->tot_data_buf_len <= MHLEN) { m_new = m_gethdr(M_NOWAIT, MT_DATA); if (m_new == NULL) return (0); memcpy(mtod(m_new, void *), packet->data, packet->tot_data_buf_len); m_new->m_pkthdr.len = m_new->m_len = packet->tot_data_buf_len; rxr->hn_small_pkts++; } else { /* * Get an mbuf with a cluster. For packets 2K or less, * get a standard 2K cluster. For anything larger, get a * 4K cluster. Any buffers larger than 4K can cause problems * if looped around to the Hyper-V TX channel, so avoid them. */ size = MCLBYTES; if (packet->tot_data_buf_len > MCLBYTES) { /* 4096 */ size = MJUMPAGESIZE; } m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size); if (m_new == NULL) { if_printf(ifp, "alloc mbuf failed.\n"); return (0); } hv_m_append(m_new, packet->tot_data_buf_len, packet->data); } m_new->m_pkthdr.rcvif = ifp; if (__predict_false((ifp->if_capenable & IFCAP_RXCSUM) == 0)) do_csum = 0; /* receive side checksum offload */ if (csum_info != NULL) { /* IP csum offload */ if (csum_info->receive.ip_csum_succeeded && do_csum) { m_new->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID); rxr->hn_csum_ip++; } /* TCP/UDP csum offload */ if ((csum_info->receive.tcp_csum_succeeded || csum_info->receive.udp_csum_succeeded) && do_csum) { m_new->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m_new->m_pkthdr.csum_data = 0xffff; if (csum_info->receive.tcp_csum_succeeded) rxr->hn_csum_tcp++; else rxr->hn_csum_udp++; } if (csum_info->receive.ip_csum_succeeded && csum_info->receive.tcp_csum_succeeded) do_lro = 1; } else { const struct ether_header *eh; uint16_t etype; int hoff; hoff = sizeof(*eh); if (m_new->m_len < hoff) goto skip; eh = mtod(m_new, struct ether_header *); etype = ntohs(eh->ether_type); if (etype == ETHERTYPE_VLAN) { const struct ether_vlan_header *evl; hoff = sizeof(*evl); if (m_new->m_len < hoff) goto skip; evl = mtod(m_new, struct ether_vlan_header *); etype = ntohs(evl->evl_proto); } if (etype == ETHERTYPE_IP) { int pr; pr = hn_check_iplen(m_new, hoff); if (pr == IPPROTO_TCP) { if (do_csum && (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_TCP)) { rxr->hn_csum_trusted++; m_new->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m_new->m_pkthdr.csum_data = 0xffff; } /* Rely on SW csum verification though... */ do_lro = 1; } else if (pr == IPPROTO_UDP) { if (do_csum && (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_UDP)) { rxr->hn_csum_trusted++; m_new->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m_new->m_pkthdr.csum_data = 0xffff; } } else if (pr != IPPROTO_DONE && do_csum && (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) { rxr->hn_csum_trusted++; m_new->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID); } } } skip: if ((packet->vlan_tci != 0) && (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { m_new->m_pkthdr.ether_vtag = packet->vlan_tci; m_new->m_flags |= M_VLANTAG; } /* * Note: Moved RX completion back to hv_nv_on_receive() so all * messages (not just data messages) will trigger a response. */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if ((ifp->if_capenable & IFCAP_LRO) && do_lro) { #if defined(INET) || defined(INET6) struct lro_ctrl *lro = &rxr->hn_lro; if (lro->lro_cnt) { rxr->hn_lro_tried++; if (tcp_lro_rx(lro, m_new, 0) == 0) { /* DONE! */ return 0; } } #endif } /* We're not holding the lock here, so don't release it */ (*ifp->if_input)(ifp, m_new); return (0); } void netvsc_recv_rollup(struct hv_device *device_ctx __unused) { } /* * Rules for using sc->temp_unusable: * 1. sc->temp_unusable can only be read or written while holding NV_LOCK() * 2. code reading sc->temp_unusable under NV_LOCK(), and finding * sc->temp_unusable set, must release NV_LOCK() and exit * 3. to retain exclusive control of the interface, * sc->temp_unusable must be set by code before releasing NV_LOCK() * 4. only code setting sc->temp_unusable can clear sc->temp_unusable * 5. code setting sc->temp_unusable must eventually clear sc->temp_unusable */ /* * Standard ioctl entry point. Called when the user wants to configure * the interface. */ static int hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { hn_softc_t *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; #ifdef INET struct ifaddr *ifa = (struct ifaddr *)data; #endif netvsc_device_info device_info; struct hv_device *hn_dev; int mask, error = 0; int retry_cnt = 500; switch(cmd) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) hn_ifinit(sc); arp_ifinit(ifp, ifa); } else #endif error = ether_ioctl(ifp, cmd, data); break; case SIOCSIFMTU: hn_dev = vmbus_get_devctx(sc->hn_dev); /* Check MTU value change */ if (ifp->if_mtu == ifr->ifr_mtu) break; if (ifr->ifr_mtu > NETVSC_MAX_CONFIGURABLE_MTU) { error = EINVAL; break; } /* Obtain and record requested MTU */ ifp->if_mtu = ifr->ifr_mtu; /* * Make sure that LRO aggregation length limit is still * valid, after the MTU change. */ NV_LOCK(sc); if (sc->hn_rx_ring[0].hn_lro.lro_length_lim < HN_LRO_LENLIM_MIN(ifp)) { int i; for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { sc->hn_rx_ring[i].hn_lro.lro_length_lim = HN_LRO_LENLIM_MIN(ifp); } } NV_UNLOCK(sc); do { NV_LOCK(sc); if (!sc->temp_unusable) { sc->temp_unusable = TRUE; retry_cnt = -1; } NV_UNLOCK(sc); if (retry_cnt > 0) { retry_cnt--; DELAY(5 * 1000); } } while (retry_cnt > 0); if (retry_cnt == 0) { error = EINVAL; break; } /* We must remove and add back the device to cause the new * MTU to take effect. This includes tearing down, but not * deleting the channel, then bringing it back up. */ error = hv_rf_on_device_remove(hn_dev, HV_RF_NV_RETAIN_CHANNEL); if (error) { NV_LOCK(sc); sc->temp_unusable = FALSE; NV_UNLOCK(sc); break; } error = hv_rf_on_device_add(hn_dev, &device_info); if (error) { NV_LOCK(sc); sc->temp_unusable = FALSE; NV_UNLOCK(sc); break; } sc->hn_tx_chimney_max = sc->net_dev->send_section_size; if (sc->hn_tx_ring[0].hn_tx_chimney_size > sc->hn_tx_chimney_max) hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max); hn_ifinit_locked(sc); NV_LOCK(sc); sc->temp_unusable = FALSE; NV_UNLOCK(sc); break; case SIOCSIFFLAGS: do { NV_LOCK(sc); if (!sc->temp_unusable) { sc->temp_unusable = TRUE; retry_cnt = -1; } NV_UNLOCK(sc); if (retry_cnt > 0) { retry_cnt--; DELAY(5 * 1000); } } while (retry_cnt > 0); if (retry_cnt == 0) { error = EINVAL; break; } if (ifp->if_flags & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. */ #ifdef notyet /* Fixme: Promiscuous mode? */ if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->hn_if_flags & IFF_PROMISC)) { /* do something here for Hyper-V */ } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->hn_if_flags & IFF_PROMISC) { /* do something here for Hyper-V */ } else #endif hn_ifinit_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { hn_stop(sc); } } NV_LOCK(sc); sc->temp_unusable = FALSE; NV_UNLOCK(sc); sc->hn_if_flags = ifp->if_flags; error = 0; break; case SIOCSIFCAP: NV_LOCK(sc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; if (ifp->if_capenable & IFCAP_TXCSUM) { ifp->if_hwassist |= sc->hn_tx_ring[0].hn_csum_assist; } else { ifp->if_hwassist &= ~sc->hn_tx_ring[0].hn_csum_assist; } } if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (mask & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; if (ifp->if_capenable & IFCAP_TSO4) ifp->if_hwassist |= CSUM_IP_TSO; else ifp->if_hwassist &= ~CSUM_IP_TSO; } if (mask & IFCAP_TSO6) { ifp->if_capenable ^= IFCAP_TSO6; if (ifp->if_capenable & IFCAP_TSO6) ifp->if_hwassist |= CSUM_IP6_TSO; else ifp->if_hwassist &= ~CSUM_IP6_TSO; } NV_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: #ifdef notyet /* Fixme: Multicast mode? */ if (ifp->if_drv_flags & IFF_DRV_RUNNING) { NV_LOCK(sc); netvsc_setmulti(sc); NV_UNLOCK(sc); error = 0; } #endif error = EINVAL; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->hn_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } /* * */ static void hn_stop(hn_softc_t *sc) { struct ifnet *ifp; int ret; struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev); ifp = sc->hn_ifp; if (bootverbose) printf(" Closing Device ...\n"); atomic_clear_int(&ifp->if_drv_flags, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); if_link_state_change(ifp, LINK_STATE_DOWN); sc->hn_initdone = 0; ret = hv_rf_on_close(device_ctx); } /* * FreeBSD transmit entry point */ static void hn_start(struct ifnet *ifp) { struct hn_softc *sc = ifp->if_softc; struct hn_tx_ring *txr = &sc->hn_tx_ring[0]; if (txr->hn_sched_tx) goto do_sched; if (mtx_trylock(&txr->hn_tx_lock)) { int sched; sched = hn_start_locked(txr, txr->hn_direct_tx_size); mtx_unlock(&txr->hn_tx_lock); if (!sched) return; } do_sched: taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task); } static void hn_start_txeof(struct hn_tx_ring *txr) { struct hn_softc *sc = txr->hn_sc; struct ifnet *ifp = sc->hn_ifp; KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring")); if (txr->hn_sched_tx) goto do_sched; if (mtx_trylock(&txr->hn_tx_lock)) { int sched; atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); sched = hn_start_locked(txr, txr->hn_direct_tx_size); mtx_unlock(&txr->hn_tx_lock); if (sched) { taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task); } } else { do_sched: /* * Release the OACTIVE earlier, with the hope, that * others could catch up. The task will clear the * flag again with the hn_tx_lock to avoid possible * races. */ atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task); } } /* * */ static void hn_ifinit_locked(hn_softc_t *sc) { struct ifnet *ifp; struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev); int ret; ifp = sc->hn_ifp; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { return; } hv_promisc_mode = 1; ret = hv_rf_on_open(device_ctx); if (ret != 0) { return; } else { sc->hn_initdone = 1; } atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); atomic_set_int(&ifp->if_drv_flags, IFF_DRV_RUNNING); if_link_state_change(ifp, LINK_STATE_UP); } /* * */ static void hn_ifinit(void *xsc) { hn_softc_t *sc = xsc; NV_LOCK(sc); if (sc->temp_unusable) { NV_UNLOCK(sc); return; } sc->temp_unusable = TRUE; NV_UNLOCK(sc); hn_ifinit_locked(sc); NV_LOCK(sc); sc->temp_unusable = FALSE; NV_UNLOCK(sc); } #ifdef LATER /* * */ static void hn_watchdog(struct ifnet *ifp) { hn_softc_t *sc; sc = ifp->if_softc; printf("hn%d: watchdog timeout -- resetting\n", sc->hn_unit); hn_ifinit(sc); /*???*/ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } #endif static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; unsigned int lenlim; int error, i; lenlim = sc->hn_rx_ring[0].hn_lro.lro_length_lim; error = sysctl_handle_int(oidp, &lenlim, 0, req); if (error || req->newptr == NULL) return error; if (lenlim < HN_LRO_LENLIM_MIN(sc->hn_ifp) || lenlim > TCP_LRO_LENGTH_MAX) return EINVAL; NV_LOCK(sc); for (i = 0; i < sc->hn_rx_ring_cnt; ++i) sc->hn_rx_ring[i].hn_lro.lro_length_lim = lenlim; NV_UNLOCK(sc); return 0; } static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int ackcnt, error, i; /* * lro_ackcnt_lim is append count limit, * +1 to turn it into aggregation limit. */ ackcnt = sc->hn_rx_ring[0].hn_lro.lro_ackcnt_lim + 1; error = sysctl_handle_int(oidp, &ackcnt, 0, req); if (error || req->newptr == NULL) return error; if (ackcnt < 2 || ackcnt > (TCP_LRO_ACKCNT_MAX + 1)) return EINVAL; /* * Convert aggregation limit back to append * count limit. */ --ackcnt; NV_LOCK(sc); for (i = 0; i < sc->hn_rx_ring_cnt; ++i) sc->hn_rx_ring[i].hn_lro.lro_ackcnt_lim = ackcnt; NV_UNLOCK(sc); return 0; } static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int hcsum = arg2; int on, error, i; on = 0; if (sc->hn_rx_ring[0].hn_trust_hcsum & hcsum) on = 1; error = sysctl_handle_int(oidp, &on, 0, req); if (error || req->newptr == NULL) return error; NV_LOCK(sc); for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { struct hn_rx_ring *rxr = &sc->hn_rx_ring[i]; if (on) rxr->hn_trust_hcsum |= hcsum; else rxr->hn_trust_hcsum &= ~hcsum; } NV_UNLOCK(sc); return 0; } static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int chimney_size, error; chimney_size = sc->hn_tx_ring[0].hn_tx_chimney_size; error = sysctl_handle_int(oidp, &chimney_size, 0, req); if (error || req->newptr == NULL) return error; if (chimney_size > sc->hn_tx_chimney_max || chimney_size <= 0) return EINVAL; hn_set_tx_chimney_size(sc, chimney_size); return 0; } static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int ofs = arg2, i, error; struct hn_rx_ring *rxr; u_long stat; stat = 0; for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { rxr = &sc->hn_rx_ring[i]; stat += *((u_long *)((uint8_t *)rxr + ofs)); } error = sysctl_handle_long(oidp, &stat, 0, req); if (error || req->newptr == NULL) return error; /* Zero out this stat. */ for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { rxr = &sc->hn_rx_ring[i]; *((u_long *)((uint8_t *)rxr + ofs)) = 0; } return 0; } static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int ofs = arg2, i, error; struct hn_rx_ring *rxr; uint64_t stat; stat = 0; for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { rxr = &sc->hn_rx_ring[i]; stat += *((uint64_t *)((uint8_t *)rxr + ofs)); } error = sysctl_handle_64(oidp, &stat, 0, req); if (error || req->newptr == NULL) return error; /* Zero out this stat. */ for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { rxr = &sc->hn_rx_ring[i]; *((uint64_t *)((uint8_t *)rxr + ofs)) = 0; } return 0; } static int hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int ofs = arg2, i, error; struct hn_tx_ring *txr; u_long stat; stat = 0; for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { txr = &sc->hn_tx_ring[i]; stat += *((u_long *)((uint8_t *)txr + ofs)); } error = sysctl_handle_long(oidp, &stat, 0, req); if (error || req->newptr == NULL) return error; /* Zero out this stat. */ for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { txr = &sc->hn_tx_ring[i]; *((u_long *)((uint8_t *)txr + ofs)) = 0; } return 0; } static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int ofs = arg2, i, error, conf; struct hn_tx_ring *txr; txr = &sc->hn_tx_ring[0]; conf = *((int *)((uint8_t *)txr + ofs)); error = sysctl_handle_int(oidp, &conf, 0, req); if (error || req->newptr == NULL) return error; NV_LOCK(sc); for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { txr = &sc->hn_tx_ring[i]; *((int *)((uint8_t *)txr + ofs)) = conf; } NV_UNLOCK(sc); return 0; } static int hn_check_iplen(const struct mbuf *m, int hoff) { const struct ip *ip; int len, iphlen, iplen; const struct tcphdr *th; int thoff; /* TCP data offset */ len = hoff + sizeof(struct ip); /* The packet must be at least the size of an IP header. */ if (m->m_pkthdr.len < len) return IPPROTO_DONE; /* The fixed IP header must reside completely in the first mbuf. */ if (m->m_len < len) return IPPROTO_DONE; ip = mtodo(m, hoff); /* Bound check the packet's stated IP header length. */ iphlen = ip->ip_hl << 2; if (iphlen < sizeof(struct ip)) /* minimum header length */ return IPPROTO_DONE; /* The full IP header must reside completely in the one mbuf. */ if (m->m_len < hoff + iphlen) return IPPROTO_DONE; iplen = ntohs(ip->ip_len); /* * Check that the amount of data in the buffers is as * at least much as the IP header would have us expect. */ if (m->m_pkthdr.len < hoff + iplen) return IPPROTO_DONE; /* * Ignore IP fragments. */ if (ntohs(ip->ip_off) & (IP_OFFMASK | IP_MF)) return IPPROTO_DONE; /* * The TCP/IP or UDP/IP header must be entirely contained within * the first fragment of a packet. */ switch (ip->ip_p) { case IPPROTO_TCP: if (iplen < iphlen + sizeof(struct tcphdr)) return IPPROTO_DONE; if (m->m_len < hoff + iphlen + sizeof(struct tcphdr)) return IPPROTO_DONE; th = (const struct tcphdr *)((const uint8_t *)ip + iphlen); thoff = th->th_off << 2; if (thoff < sizeof(struct tcphdr) || thoff + iphlen > iplen) return IPPROTO_DONE; if (m->m_len < hoff + iphlen + thoff) return IPPROTO_DONE; break; case IPPROTO_UDP: if (iplen < iphlen + sizeof(struct udphdr)) return IPPROTO_DONE; if (m->m_len < hoff + iphlen + sizeof(struct udphdr)) return IPPROTO_DONE; break; default: if (iplen < iphlen) return IPPROTO_DONE; break; } return ip->ip_p; } static void hn_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = arg; if (error) return; KASSERT(nseg == 1, ("too many segments %d!", nseg)); *paddr = segs->ds_addr; } static void hn_create_rx_data(struct hn_softc *sc) { struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; device_t dev = sc->hn_dev; #if defined(INET) || defined(INET6) #if __FreeBSD_version >= 1100095 int lroent_cnt; #endif #endif int i; sc->hn_rx_ring_cnt = 1; /* TODO: vRSS */ sc->hn_rx_ring = malloc(sizeof(struct hn_rx_ring) * sc->hn_rx_ring_cnt, M_NETVSC, M_WAITOK | M_ZERO); #if defined(INET) || defined(INET6) #if __FreeBSD_version >= 1100095 lroent_cnt = hn_lro_entry_count; if (lroent_cnt < TCP_LRO_ENTRIES) lroent_cnt = TCP_LRO_ENTRIES; device_printf(dev, "LRO: entry count %d\n", lroent_cnt); #endif #endif /* INET || INET6 */ for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { struct hn_rx_ring *rxr = &sc->hn_rx_ring[i]; if (hn_trust_hosttcp) rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP; if (hn_trust_hostudp) rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_UDP; if (hn_trust_hostip) rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_IP; /* * Initialize LRO. */ #if defined(INET) || defined(INET6) #if __FreeBSD_version >= 1100095 tcp_lro_init_args(&rxr->hn_lro, sc->hn_ifp, lroent_cnt, 0); #else tcp_lro_init(&rxr->hn_lro); rxr->hn_lro.ifp = sc->hn_ifp; #endif rxr->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF; rxr->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF; #endif /* INET || INET6 */ } ctx = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_queued", CTLTYPE_U64 | CTLFLAG_RW, sc, __offsetof(struct hn_rx_ring, hn_lro.lro_queued), hn_rx_stat_u64_sysctl, "LU", "LRO queued"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_flushed", CTLTYPE_U64 | CTLFLAG_RW, sc, __offsetof(struct hn_rx_ring, hn_lro.lro_flushed), hn_rx_stat_u64_sysctl, "LU", "LRO flushed"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_tried", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_rx_ring, hn_lro_tried), hn_rx_stat_ulong_sysctl, "LU", "# of LRO tries"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim", CTLTYPE_UINT | CTLFLAG_RW, sc, 0, hn_lro_lenlim_sysctl, "IU", "Max # of data bytes to be aggregated by LRO"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim", CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_lro_ackcnt_sysctl, "I", "Max # of ACKs to be aggregated by LRO"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp", CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_TCP, hn_trust_hcsum_sysctl, "I", "Trust tcp segement verification on host side, " "when csum info is missing"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostudp", CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_UDP, hn_trust_hcsum_sysctl, "I", "Trust udp datagram verification on host side, " "when csum info is missing"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostip", CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_IP, hn_trust_hcsum_sysctl, "I", "Trust ip packet verification on host side, " "when csum info is missing"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_ip", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_rx_ring, hn_csum_ip), hn_rx_stat_ulong_sysctl, "LU", "RXCSUM IP"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_tcp", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_rx_ring, hn_csum_tcp), hn_rx_stat_ulong_sysctl, "LU", "RXCSUM TCP"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_udp", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_rx_ring, hn_csum_udp), hn_rx_stat_ulong_sysctl, "LU", "RXCSUM UDP"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_trusted", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_rx_ring, hn_csum_trusted), hn_rx_stat_ulong_sysctl, "LU", "# of packets that we trust host's csum verification"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "small_pkts", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_rx_ring, hn_small_pkts), hn_rx_stat_ulong_sysctl, "LU", "# of small packets received"); } static void hn_destroy_rx_data(struct hn_softc *sc) { #if defined(INET) || defined(INET6) int i; #endif if (sc->hn_rx_ring_cnt == 0) return; #if defined(INET) || defined(INET6) for (i = 0; i < sc->hn_rx_ring_cnt; ++i) tcp_lro_free(&sc->hn_rx_ring[i].hn_lro); #endif free(sc->hn_rx_ring, M_NETVSC); sc->hn_rx_ring = NULL; sc->hn_rx_ring_cnt = 0; } static int hn_create_tx_ring(struct hn_softc *sc, int id) { struct hn_tx_ring *txr = &sc->hn_tx_ring[id]; bus_dma_tag_t parent_dtag; int error, i; txr->hn_sc = sc; #ifndef HN_USE_TXDESC_BUFRING mtx_init(&txr->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN); #endif mtx_init(&txr->hn_tx_lock, "hn tx", NULL, MTX_DEF); txr->hn_txdesc_cnt = HN_TX_DESC_CNT; txr->hn_txdesc = malloc(sizeof(struct hn_txdesc) * txr->hn_txdesc_cnt, M_NETVSC, M_WAITOK | M_ZERO); #ifndef HN_USE_TXDESC_BUFRING SLIST_INIT(&txr->hn_txlist); #else txr->hn_txdesc_br = buf_ring_alloc(txr->hn_txdesc_cnt, M_NETVSC, M_WAITOK, &txr->hn_tx_lock); #endif txr->hn_tx_taskq = sc->hn_tx_taskq; TASK_INIT(&txr->hn_tx_task, 0, hn_start_taskfunc, txr); TASK_INIT(&txr->hn_txeof_task, 0, hn_start_txeof_taskfunc, txr); txr->hn_direct_tx_size = hn_direct_tx_size; if (hv_vmbus_protocal_version >= HV_VMBUS_VERSION_WIN8_1) txr->hn_csum_assist = HN_CSUM_ASSIST; else txr->hn_csum_assist = HN_CSUM_ASSIST_WIN8; /* * Always schedule transmission instead of trying to do direct * transmission. This one gives the best performance so far. */ txr->hn_sched_tx = 1; txr->hn_txeof = hn_start_txeof; /* TODO: if_transmit */ parent_dtag = bus_get_dma_tag(sc->hn_dev); /* DMA tag for RNDIS messages. */ error = bus_dma_tag_create(parent_dtag, /* parent */ HN_RNDIS_MSG_ALIGN, /* alignment */ HN_RNDIS_MSG_BOUNDARY, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HN_RNDIS_MSG_LEN, /* maxsize */ 1, /* nsegments */ HN_RNDIS_MSG_LEN, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txr->hn_tx_rndis_dtag); if (error) { device_printf(sc->hn_dev, "failed to create rndis dmatag\n"); return error; } /* DMA tag for data. */ error = bus_dma_tag_create(parent_dtag, /* parent */ 1, /* alignment */ HN_TX_DATA_BOUNDARY, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HN_TX_DATA_MAXSIZE, /* maxsize */ HN_TX_DATA_SEGCNT_MAX, /* nsegments */ HN_TX_DATA_SEGSIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txr->hn_tx_data_dtag); if (error) { device_printf(sc->hn_dev, "failed to create data dmatag\n"); return error; } for (i = 0; i < txr->hn_txdesc_cnt; ++i) { struct hn_txdesc *txd = &txr->hn_txdesc[i]; txd->txr = txr; /* * Allocate and load RNDIS messages. */ error = bus_dmamem_alloc(txr->hn_tx_rndis_dtag, (void **)&txd->rndis_msg, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &txd->rndis_msg_dmap); if (error) { device_printf(sc->hn_dev, "failed to allocate rndis_msg, %d\n", i); return error; } error = bus_dmamap_load(txr->hn_tx_rndis_dtag, txd->rndis_msg_dmap, txd->rndis_msg, HN_RNDIS_MSG_LEN, hn_dma_map_paddr, &txd->rndis_msg_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->hn_dev, "failed to load rndis_msg, %d\n", i); bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_msg, txd->rndis_msg_dmap); return error; } /* DMA map for TX data. */ error = bus_dmamap_create(txr->hn_tx_data_dtag, 0, &txd->data_dmap); if (error) { device_printf(sc->hn_dev, "failed to allocate tx data dmamap\n"); bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_msg_dmap); bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_msg, txd->rndis_msg_dmap); return error; } /* All set, put it to list */ txd->flags |= HN_TXD_FLAG_ONLIST; #ifndef HN_USE_TXDESC_BUFRING SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link); #else buf_ring_enqueue(txr->hn_txdesc_br, txd); #endif } txr->hn_txdesc_avail = txr->hn_txdesc_cnt; if (sc->hn_tx_sysctl_tree != NULL) { struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; char name[16]; /* * Create per TX ring sysctl tree: * dev.hn.UNIT.tx.RINGID */ ctx = device_get_sysctl_ctx(sc->hn_dev); child = SYSCTL_CHILDREN(sc->hn_tx_sysctl_tree); snprintf(name, sizeof(name), "%d", id); txr->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, name, CTLFLAG_RD, 0, ""); if (txr->hn_tx_sysctl_tree != NULL) { child = SYSCTL_CHILDREN(txr->hn_tx_sysctl_tree); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail", CTLFLAG_RD, &txr->hn_txdesc_avail, 0, "# of available TX descs"); } } return 0; } static void hn_txdesc_dmamap_destroy(struct hn_txdesc *txd) { struct hn_tx_ring *txr = txd->txr; KASSERT(txd->m == NULL, ("still has mbuf installed")); KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("still dma mapped")); bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_msg_dmap); bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_msg, txd->rndis_msg_dmap); bus_dmamap_destroy(txr->hn_tx_data_dtag, txd->data_dmap); } static void hn_destroy_tx_ring(struct hn_tx_ring *txr) { struct hn_txdesc *txd; if (txr->hn_txdesc == NULL) return; #ifndef HN_USE_TXDESC_BUFRING while ((txd = SLIST_FIRST(&txr->hn_txlist)) != NULL) { SLIST_REMOVE_HEAD(&txr->hn_txlist, link); hn_txdesc_dmamap_destroy(txd); } #else while ((txd = buf_ring_dequeue_sc(txr->hn_txdesc_br)) != NULL) hn_txdesc_dmamap_destroy(txd); #endif if (txr->hn_tx_data_dtag != NULL) bus_dma_tag_destroy(txr->hn_tx_data_dtag); if (txr->hn_tx_rndis_dtag != NULL) bus_dma_tag_destroy(txr->hn_tx_rndis_dtag); #ifdef HN_USE_TXDESC_BUFRING buf_ring_free(txr->hn_txdesc_br, M_NETVSC); #endif free(txr->hn_txdesc, M_NETVSC); txr->hn_txdesc = NULL; #ifndef HN_USE_TXDESC_BUFRING mtx_destroy(&txr->hn_txlist_spin); #endif mtx_destroy(&txr->hn_tx_lock); } static int hn_create_tx_data(struct hn_softc *sc) { struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; int i; sc->hn_tx_ring_cnt = 1; /* TODO: vRSS */ sc->hn_tx_ring = malloc(sizeof(struct hn_tx_ring) * sc->hn_tx_ring_cnt, M_NETVSC, M_WAITOK | M_ZERO); ctx = device_get_sysctl_ctx(sc->hn_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->hn_dev)); /* Create dev.hn.UNIT.tx sysctl tree */ sc->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "tx", CTLFLAG_RD, 0, ""); for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { int error; error = hn_create_tx_ring(sc, i); if (error) return error; } SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "no_txdescs", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_tx_ring, hn_no_txdescs), hn_tx_stat_ulong_sysctl, "LU", "# of times short of TX descs"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "send_failed", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_tx_ring, hn_send_failed), hn_tx_stat_ulong_sysctl, "LU", "# of hyper-v sending failure"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txdma_failed", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_tx_ring, hn_txdma_failed), hn_tx_stat_ulong_sysctl, "LU", "# of TX DMA failure"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_collapsed", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_tx_ring, hn_tx_collapsed), hn_tx_stat_ulong_sysctl, "LU", "# of TX mbuf collapsed"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_tx_ring, hn_tx_chimney), hn_tx_stat_ulong_sysctl, "LU", "# of chimney send"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_cnt", CTLFLAG_RD, &sc->hn_tx_ring[0].hn_txdesc_cnt, 0, "# of total TX descs"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max", CTLFLAG_RD, &sc->hn_tx_chimney_max, 0, "Chimney send packet size upper boundary"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size", CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_tx_chimney_size_sysctl, "I", "Chimney send packet size limit"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "direct_tx_size", CTLTYPE_INT | CTLFLAG_RW, sc, __offsetof(struct hn_tx_ring, hn_direct_tx_size), hn_tx_conf_int_sysctl, "I", "Size of the packet for direct transmission"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "sched_tx", CTLTYPE_INT | CTLFLAG_RW, sc, __offsetof(struct hn_tx_ring, hn_sched_tx), hn_tx_conf_int_sysctl, "I", "Always schedule transmission " "instead of doing direct transmission"); return 0; } static void hn_set_tx_chimney_size(struct hn_softc *sc, int chimney_size) { int i; NV_LOCK(sc); for (i = 0; i < sc->hn_tx_ring_cnt; ++i) sc->hn_tx_ring[i].hn_tx_chimney_size = chimney_size; NV_UNLOCK(sc); } static void hn_destroy_tx_data(struct hn_softc *sc) { int i; if (sc->hn_tx_ring_cnt == 0) return; for (i = 0; i < sc->hn_tx_ring_cnt; ++i) hn_destroy_tx_ring(&sc->hn_tx_ring[i]); free(sc->hn_tx_ring, M_NETVSC); sc->hn_tx_ring = NULL; sc->hn_tx_ring_cnt = 0; } static void hn_start_taskfunc(void *xtxr, int pending __unused) { struct hn_tx_ring *txr = xtxr; mtx_lock(&txr->hn_tx_lock); hn_start_locked(txr, 0); mtx_unlock(&txr->hn_tx_lock); } static void hn_start_txeof_taskfunc(void *xtxr, int pending __unused) { struct hn_tx_ring *txr = xtxr; mtx_lock(&txr->hn_tx_lock); atomic_clear_int(&txr->hn_sc->hn_ifp->if_drv_flags, IFF_DRV_OACTIVE); hn_start_locked(txr, 0); mtx_unlock(&txr->hn_tx_lock); } static void hn_stop_tx_tasks(struct hn_softc *sc) { int i; for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { struct hn_tx_ring *txr = &sc->hn_tx_ring[i]; taskqueue_drain(txr->hn_tx_taskq, &txr->hn_tx_task); taskqueue_drain(txr->hn_tx_taskq, &txr->hn_txeof_task); } } static void hn_tx_taskq_create(void *arg __unused) { if (!hn_share_tx_taskq) return; hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK, taskqueue_thread_enqueue, &hn_tx_taskq); if (hn_bind_tx_taskq >= 0) { int cpu = hn_bind_tx_taskq; cpuset_t cpu_set; if (cpu > mp_ncpus - 1) cpu = mp_ncpus - 1; CPU_SETOF(cpu, &cpu_set); taskqueue_start_threads_cpuset(&hn_tx_taskq, 1, PI_NET, &cpu_set, "hn tx"); } else { taskqueue_start_threads(&hn_tx_taskq, 1, PI_NET, "hn tx"); } } SYSINIT(hn_txtq_create, SI_SUB_DRIVERS, SI_ORDER_FIRST, hn_tx_taskq_create, NULL); static void hn_tx_taskq_destroy(void *arg __unused) { if (hn_tx_taskq != NULL) taskqueue_free(hn_tx_taskq); } SYSUNINIT(hn_txtq_destroy, SI_SUB_DRIVERS, SI_ORDER_FIRST, hn_tx_taskq_destroy, NULL); static device_method_t netvsc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, netvsc_probe), DEVMETHOD(device_attach, netvsc_attach), DEVMETHOD(device_detach, netvsc_detach), DEVMETHOD(device_shutdown, netvsc_shutdown), { 0, 0 } }; static driver_t netvsc_driver = { NETVSC_DEVNAME, netvsc_methods, sizeof(hn_softc_t) }; static devclass_t netvsc_devclass; DRIVER_MODULE(hn, vmbus, netvsc_driver, netvsc_devclass, 0, 0); MODULE_VERSION(hn, 1); MODULE_DEPEND(hn, vmbus, 1, 1, 1); Index: projects/release-pkg/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c =================================================================== --- projects/release-pkg/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c (revision 295925) +++ projects/release-pkg/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c (revision 295926) @@ -1,2144 +1,2144 @@ /*- * Copyright (c) 2009-2012 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * Copyright (c) 2012 Citrix Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * StorVSC driver for Hyper-V. This driver presents a SCSI HBA interface * to the Comman Access Method (CAM) layer. CAM control blocks (CCBs) are * converted into VSCSI protocol messages which are delivered to the parent * partition StorVSP driver over the Hyper-V VMBUS. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "hv_vstorage.h" #define STORVSC_RINGBUFFER_SIZE (20*PAGE_SIZE) #define STORVSC_MAX_LUNS_PER_TARGET (64) #define STORVSC_MAX_IO_REQUESTS (STORVSC_MAX_LUNS_PER_TARGET * 2) #define BLKVSC_MAX_IDE_DISKS_PER_TARGET (1) #define BLKVSC_MAX_IO_REQUESTS STORVSC_MAX_IO_REQUESTS #define STORVSC_MAX_TARGETS (2) #define STORVSC_WIN7_MAJOR 4 #define STORVSC_WIN7_MINOR 2 #define STORVSC_WIN8_MAJOR 5 #define STORVSC_WIN8_MINOR 1 #define VSTOR_PKT_SIZE (sizeof(struct vstor_packet) - vmscsi_size_delta) #define HV_ALIGN(x, a) roundup2(x, a) struct storvsc_softc; struct hv_sgl_node { LIST_ENTRY(hv_sgl_node) link; struct sglist *sgl_data; }; struct hv_sgl_page_pool{ LIST_HEAD(, hv_sgl_node) in_use_sgl_list; LIST_HEAD(, hv_sgl_node) free_sgl_list; boolean_t is_init; } g_hv_sgl_page_pool; #define STORVSC_MAX_SG_PAGE_CNT STORVSC_MAX_IO_REQUESTS * HV_MAX_MULTIPAGE_BUFFER_COUNT enum storvsc_request_type { WRITE_TYPE, READ_TYPE, UNKNOWN_TYPE }; struct hv_storvsc_request { LIST_ENTRY(hv_storvsc_request) link; struct vstor_packet vstor_packet; hv_vmbus_multipage_buffer data_buf; void *sense_data; uint8_t sense_info_len; uint8_t retries; union ccb *ccb; struct storvsc_softc *softc; struct callout callout; struct sema synch_sema; /*Synchronize the request/response if needed */ struct sglist *bounce_sgl; unsigned int bounce_sgl_count; uint64_t not_aligned_seg_bits; }; struct storvsc_softc { struct hv_device *hs_dev; LIST_HEAD(, hv_storvsc_request) hs_free_list; struct mtx hs_lock; struct storvsc_driver_props *hs_drv_props; int hs_unit; uint32_t hs_frozen; struct cam_sim *hs_sim; struct cam_path *hs_path; uint32_t hs_num_out_reqs; boolean_t hs_destroy; boolean_t hs_drain_notify; boolean_t hs_open_multi_channel; struct sema hs_drain_sema; struct hv_storvsc_request hs_init_req; struct hv_storvsc_request hs_reset_req; }; /** * HyperV storvsc timeout testing cases: * a. IO returned after first timeout; * b. IO returned after second timeout and queue freeze; * c. IO returned while timer handler is running * The first can be tested by "sg_senddiag -vv /dev/daX", * and the second and third can be done by * "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX". */ #define HVS_TIMEOUT_TEST 0 /* * Bus/adapter reset functionality on the Hyper-V host is * buggy and it will be disabled until * it can be further tested. */ #define HVS_HOST_RESET 0 struct storvsc_driver_props { char *drv_name; char *drv_desc; uint8_t drv_max_luns_per_target; uint8_t drv_max_ios_per_target; uint32_t drv_ringbuffer_size; }; enum hv_storage_type { DRIVER_BLKVSC, DRIVER_STORVSC, DRIVER_UNKNOWN }; #define HS_MAX_ADAPTERS 10 #define HV_STORAGE_SUPPORTS_MULTI_CHANNEL 0x1 /* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */ static const hv_guid gStorVscDeviceType={ .data = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f} }; /* {32412632-86cb-44a2-9b5c-50d1417354f5} */ static const hv_guid gBlkVscDeviceType={ .data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5} }; static struct storvsc_driver_props g_drv_props_table[] = { {"blkvsc", "Hyper-V IDE Storage Interface", BLKVSC_MAX_IDE_DISKS_PER_TARGET, BLKVSC_MAX_IO_REQUESTS, STORVSC_RINGBUFFER_SIZE}, {"storvsc", "Hyper-V SCSI Storage Interface", STORVSC_MAX_LUNS_PER_TARGET, STORVSC_MAX_IO_REQUESTS, STORVSC_RINGBUFFER_SIZE} }; /* * Sense buffer size changed in win8; have a run-time * variable to track the size we should use. */ static int sense_buffer_size; /* * The size of the vmscsi_request has changed in win8. The * additional size is for the newly added elements in the * structure. These elements are valid only when we are talking * to a win8 host. * Track the correct size we need to apply. */ static int vmscsi_size_delta; static int storvsc_current_major; static int storvsc_current_minor; /* static functions */ static int storvsc_probe(device_t dev); static int storvsc_attach(device_t dev); static int storvsc_detach(device_t dev); static void storvsc_poll(struct cam_sim * sim); static void storvsc_action(struct cam_sim * sim, union ccb * ccb); static int create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp); static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp); static enum hv_storage_type storvsc_get_storage_type(device_t dev); static void hv_storvsc_rescan_target(struct storvsc_softc *sc); static void hv_storvsc_on_channel_callback(void *context); static void hv_storvsc_on_iocompletion( struct storvsc_softc *sc, struct vstor_packet *vstor_packet, struct hv_storvsc_request *request); static int hv_storvsc_connect_vsp(struct hv_device *device); static void storvsc_io_done(struct hv_storvsc_request *reqp); static void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl, bus_dma_segment_t *orig_sgl, unsigned int orig_sgl_count, uint64_t seg_bits); void storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl, unsigned int dest_sgl_count, struct sglist* src_sgl, uint64_t seg_bits); static device_method_t storvsc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, storvsc_probe), DEVMETHOD(device_attach, storvsc_attach), DEVMETHOD(device_detach, storvsc_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD_END }; static driver_t storvsc_driver = { "storvsc", storvsc_methods, sizeof(struct storvsc_softc), }; static devclass_t storvsc_devclass; DRIVER_MODULE(storvsc, vmbus, storvsc_driver, storvsc_devclass, 0, 0); MODULE_VERSION(storvsc, 1); MODULE_DEPEND(storvsc, vmbus, 1, 1, 1); /** * The host is capable of sending messages to us that are * completely unsolicited. So, we need to address the race * condition where we may be in the process of unloading the * driver when the host may send us an unsolicited message. * We address this issue by implementing a sequentially * consistent protocol: * * 1. Channel callback is invoked while holding the the channel lock * and an unloading driver will reset the channel callback under * the protection of this channel lock. * * 2. To ensure bounded wait time for unloading a driver, we don't * permit outgoing traffic once the device is marked as being * destroyed. * * 3. Once the device is marked as being destroyed, we only * permit incoming traffic to properly account for * packets already sent out. */ static inline struct storvsc_softc * get_stor_device(struct hv_device *device, boolean_t outbound) { struct storvsc_softc *sc; sc = device_get_softc(device->device); if (sc == NULL) { return NULL; } if (outbound) { /* * Here we permit outgoing I/O only * if the device is not being destroyed. */ if (sc->hs_destroy) { sc = NULL; } } else { /* * inbound case; if being destroyed * only permit to account for * messages already sent out. */ if (sc->hs_destroy && (sc->hs_num_out_reqs == 0)) { sc = NULL; } } return sc; } /** * @brief Callback handler, will be invoked when receive mutil-channel offer * * @param context new multi-channel */ static void storvsc_handle_sc_creation(void *context) { hv_vmbus_channel *new_channel; struct hv_device *device; struct storvsc_softc *sc; struct vmstor_chan_props props; int ret = 0; new_channel = (hv_vmbus_channel *)context; device = new_channel->primary_channel->device; sc = get_stor_device(device, TRUE); if (sc == NULL) return; if (FALSE == sc->hs_open_multi_channel) return; memset(&props, 0, sizeof(props)); ret = hv_vmbus_channel_open(new_channel, sc->hs_drv_props->drv_ringbuffer_size, sc->hs_drv_props->drv_ringbuffer_size, (void *)&props, sizeof(struct vmstor_chan_props), hv_storvsc_on_channel_callback, new_channel); return; } /** * @brief Send multi-channel creation request to host * * @param device a Hyper-V device pointer * @param max_chans the max channels supported by vmbus */ static void storvsc_send_multichannel_request(struct hv_device *dev, int max_chans) { struct storvsc_softc *sc; struct hv_storvsc_request *request; struct vstor_packet *vstor_packet; int request_channels_cnt = 0; int ret; /* get multichannels count that need to create */ request_channels_cnt = MIN(max_chans, mp_ncpus); sc = get_stor_device(dev, TRUE); if (sc == NULL) { printf("Storvsc_error: get sc failed while send mutilchannel " "request\n"); return; } request = &sc->hs_init_req; /* Establish a handler for multi-channel */ dev->channel->sc_creation_callback = storvsc_handle_sc_creation; /* request the host to create multi-channel */ memset(request, 0, sizeof(struct hv_storvsc_request)); sema_init(&request->synch_sema, 0, ("stor_synch_sema")); vstor_packet = &request->vstor_packet; vstor_packet->operation = VSTOR_OPERATION_CREATE_MULTI_CHANNELS; vstor_packet->flags = REQUEST_COMPLETION_FLAG; vstor_packet->u.multi_channels_cnt = request_channels_cnt; ret = hv_vmbus_channel_send_packet( dev->channel, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); /* wait for 5 seconds */ ret = sema_timedwait(&request->synch_sema, 5 * hz); if (ret != 0) { printf("Storvsc_error: create multi-channel timeout, %d\n", ret); return; } if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || vstor_packet->status != 0) { printf("Storvsc_error: create multi-channel invalid operation " "(%d) or statue (%u)\n", vstor_packet->operation, vstor_packet->status); return; } sc->hs_open_multi_channel = TRUE; if (bootverbose) printf("Storvsc create multi-channel success!\n"); } /** * @brief initialize channel connection to parent partition * * @param dev a Hyper-V device pointer * @returns 0 on success, non-zero error on failure */ static int hv_storvsc_channel_init(struct hv_device *dev) { int ret = 0; struct hv_storvsc_request *request; struct vstor_packet *vstor_packet; struct storvsc_softc *sc; uint16_t max_chans = 0; boolean_t support_multichannel = FALSE; max_chans = 0; support_multichannel = FALSE; sc = get_stor_device(dev, TRUE); if (sc == NULL) return (ENODEV); request = &sc->hs_init_req; memset(request, 0, sizeof(struct hv_storvsc_request)); vstor_packet = &request->vstor_packet; request->softc = sc; /** * Initiate the vsc/vsp initialization protocol on the open channel */ sema_init(&request->synch_sema, 0, ("stor_synch_sema")); vstor_packet->operation = VSTOR_OPERATION_BEGININITIALIZATION; vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = hv_vmbus_channel_send_packet( dev->channel, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) goto cleanup; /* wait 5 seconds */ ret = sema_timedwait(&request->synch_sema, 5 * hz); if (ret != 0) goto cleanup; if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || vstor_packet->status != 0) { goto cleanup; } /* reuse the packet for version range supported */ memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION; vstor_packet->flags = REQUEST_COMPLETION_FLAG; vstor_packet->u.version.major_minor = VMSTOR_PROTOCOL_VERSION(storvsc_current_major, storvsc_current_minor); /* revision is only significant for Windows guests */ vstor_packet->u.version.revision = 0; ret = hv_vmbus_channel_send_packet( dev->channel, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) goto cleanup; /* wait 5 seconds */ ret = sema_timedwait(&request->synch_sema, 5 * hz); if (ret) goto cleanup; /* TODO: Check returned version */ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || vstor_packet->status != 0) goto cleanup; /** * Query channel properties */ memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_QUERYPROPERTIES; vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = hv_vmbus_channel_send_packet( dev->channel, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if ( ret != 0) goto cleanup; /* wait 5 seconds */ ret = sema_timedwait(&request->synch_sema, 5 * hz); if (ret != 0) goto cleanup; /* TODO: Check returned version */ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || vstor_packet->status != 0) { goto cleanup; } /* multi-channels feature is supported by WIN8 and above version */ max_chans = vstor_packet->u.chan_props.max_channel_cnt; if ((hv_vmbus_protocal_version != HV_VMBUS_VERSION_WIN7) && (hv_vmbus_protocal_version != HV_VMBUS_VERSION_WS2008) && (vstor_packet->u.chan_props.flags & HV_STORAGE_SUPPORTS_MULTI_CHANNEL)) { support_multichannel = TRUE; } memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION; vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = hv_vmbus_channel_send_packet( dev->channel, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) { goto cleanup; } /* wait 5 seconds */ ret = sema_timedwait(&request->synch_sema, 5 * hz); if (ret != 0) goto cleanup; if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || vstor_packet->status != 0) goto cleanup; /* * If multi-channel is supported, send multichannel create * request to host. */ if (support_multichannel) storvsc_send_multichannel_request(dev, max_chans); cleanup: sema_destroy(&request->synch_sema); return (ret); } /** * @brief Open channel connection to paraent partition StorVSP driver * * Open and initialize channel connection to parent partition StorVSP driver. * * @param pointer to a Hyper-V device * @returns 0 on success, non-zero error on failure */ static int hv_storvsc_connect_vsp(struct hv_device *dev) { int ret = 0; struct vmstor_chan_props props; struct storvsc_softc *sc; sc = device_get_softc(dev->device); memset(&props, 0, sizeof(struct vmstor_chan_props)); /* * Open the channel */ ret = hv_vmbus_channel_open( dev->channel, sc->hs_drv_props->drv_ringbuffer_size, sc->hs_drv_props->drv_ringbuffer_size, (void *)&props, sizeof(struct vmstor_chan_props), hv_storvsc_on_channel_callback, dev->channel); if (ret != 0) { return ret; } ret = hv_storvsc_channel_init(dev); return (ret); } #if HVS_HOST_RESET static int hv_storvsc_host_reset(struct hv_device *dev) { int ret = 0; struct storvsc_softc *sc; struct hv_storvsc_request *request; struct vstor_packet *vstor_packet; sc = get_stor_device(dev, TRUE); if (sc == NULL) { return ENODEV; } request = &sc->hs_reset_req; request->softc = sc; vstor_packet = &request->vstor_packet; sema_init(&request->synch_sema, 0, "stor synch sema"); vstor_packet->operation = VSTOR_OPERATION_RESETBUS; vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = hv_vmbus_channel_send_packet(dev->channel, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)&sc->hs_reset_req, HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) { goto cleanup; } ret = sema_timedwait(&request->synch_sema, 5 * hz); /* KYS 5 seconds */ if (ret) { goto cleanup; } /* * At this point, all outstanding requests in the adapter * should have been flushed out and return to us */ cleanup: sema_destroy(&request->synch_sema); return (ret); } #endif /* HVS_HOST_RESET */ /** * @brief Function to initiate an I/O request * * @param device Hyper-V device pointer * @param request pointer to a request structure * @returns 0 on success, non-zero error on failure */ static int hv_storvsc_io_request(struct hv_device *device, struct hv_storvsc_request *request) { struct storvsc_softc *sc; struct vstor_packet *vstor_packet = &request->vstor_packet; struct hv_vmbus_channel* outgoing_channel = NULL; int ret = 0; sc = get_stor_device(device, TRUE); if (sc == NULL) { return ENODEV; } vstor_packet->flags |= REQUEST_COMPLETION_FLAG; vstor_packet->u.vm_srb.length = VSTOR_PKT_SIZE; vstor_packet->u.vm_srb.sense_info_len = sense_buffer_size; vstor_packet->u.vm_srb.transfer_len = request->data_buf.length; vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB; outgoing_channel = vmbus_select_outgoing_channel(device->channel); mtx_unlock(&request->softc->hs_lock); if (request->data_buf.length) { ret = hv_vmbus_channel_send_packet_multipagebuffer( outgoing_channel, &request->data_buf, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request); } else { ret = hv_vmbus_channel_send_packet( outgoing_channel, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); } mtx_lock(&request->softc->hs_lock); if (ret != 0) { printf("Unable to send packet %p ret %d", vstor_packet, ret); } else { atomic_add_int(&sc->hs_num_out_reqs, 1); } return (ret); } /** * Process IO_COMPLETION_OPERATION and ready * the result to be completed for upper layer * processing by the CAM layer. */ static void hv_storvsc_on_iocompletion(struct storvsc_softc *sc, struct vstor_packet *vstor_packet, struct hv_storvsc_request *request) { struct vmscsi_req *vm_srb; vm_srb = &vstor_packet->u.vm_srb; if (((vm_srb->scsi_status & 0xFF) == SCSI_STATUS_CHECK_COND) && (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)) { /* Autosense data available */ KASSERT(vm_srb->sense_info_len <= request->sense_info_len, ("vm_srb->sense_info_len <= " "request->sense_info_len")); memcpy(request->sense_data, vm_srb->u.sense_data, vm_srb->sense_info_len); request->sense_info_len = vm_srb->sense_info_len; } /* Complete request by passing to the CAM layer */ storvsc_io_done(request); atomic_subtract_int(&sc->hs_num_out_reqs, 1); if (sc->hs_drain_notify && (sc->hs_num_out_reqs == 0)) { sema_post(&sc->hs_drain_sema); } } static void hv_storvsc_rescan_target(struct storvsc_softc *sc) { path_id_t pathid; target_id_t targetid; union ccb *ccb; pathid = cam_sim_path(sc->hs_sim); targetid = CAM_TARGET_WILDCARD; /* * Allocate a CCB and schedule a rescan. */ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { printf("unable to alloc CCB for rescan\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { - printf("unable to create path for rescan, pathid: %d," - "targetid: %d\n", pathid, targetid); + printf("unable to create path for rescan, pathid: %u," + "targetid: %u\n", pathid, targetid); xpt_free_ccb(ccb); return; } if (targetid == CAM_TARGET_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_BUS; else ccb->ccb_h.func_code = XPT_SCAN_TGT; xpt_rescan(ccb); } static void hv_storvsc_on_channel_callback(void *context) { int ret = 0; hv_vmbus_channel *channel = (hv_vmbus_channel *)context; struct hv_device *device = NULL; struct storvsc_softc *sc; uint32_t bytes_recvd; uint64_t request_id; uint8_t packet[roundup2(sizeof(struct vstor_packet), 8)]; struct hv_storvsc_request *request; struct vstor_packet *vstor_packet; if (channel->primary_channel != NULL){ device = channel->primary_channel->device; } else { device = channel->device; } KASSERT(device, ("device is NULL")); sc = get_stor_device(device, FALSE); if (sc == NULL) { printf("Storvsc_error: get stor device failed.\n"); return; } ret = hv_vmbus_channel_recv_packet( channel, packet, roundup2(VSTOR_PKT_SIZE, 8), &bytes_recvd, &request_id); while ((ret == 0) && (bytes_recvd > 0)) { request = (struct hv_storvsc_request *)(uintptr_t)request_id; if ((request == &sc->hs_init_req) || (request == &sc->hs_reset_req)) { memcpy(&request->vstor_packet, packet, sizeof(struct vstor_packet)); sema_post(&request->synch_sema); } else { vstor_packet = (struct vstor_packet *)packet; switch(vstor_packet->operation) { case VSTOR_OPERATION_COMPLETEIO: if (request == NULL) panic("VMBUS: storvsc received a " "packet with NULL request id in " "COMPLETEIO operation."); hv_storvsc_on_iocompletion(sc, vstor_packet, request); break; case VSTOR_OPERATION_REMOVEDEVICE: printf("VMBUS: storvsc operation %d not " "implemented.\n", vstor_packet->operation); /* TODO: implement */ break; case VSTOR_OPERATION_ENUMERATE_BUS: hv_storvsc_rescan_target(sc); break; default: break; } } ret = hv_vmbus_channel_recv_packet( channel, packet, roundup2(VSTOR_PKT_SIZE, 8), &bytes_recvd, &request_id); } } /** * @brief StorVSC probe function * * Device probe function. Returns 0 if the input device is a StorVSC * device. Otherwise, a ENXIO is returned. If the input device is * for BlkVSC (paravirtual IDE) device and this support is disabled in * favor of the emulated ATA/IDE device, return ENXIO. * * @param a device * @returns 0 on success, ENXIO if not a matcing StorVSC device */ static int storvsc_probe(device_t dev) { int ata_disk_enable = 0; int ret = ENXIO; if (hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008 || hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7) { sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE; vmscsi_size_delta = sizeof(struct vmscsi_win8_extension); storvsc_current_major = STORVSC_WIN7_MAJOR; storvsc_current_minor = STORVSC_WIN7_MINOR; } else { sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE; vmscsi_size_delta = 0; storvsc_current_major = STORVSC_WIN8_MAJOR; storvsc_current_minor = STORVSC_WIN8_MINOR; } switch (storvsc_get_storage_type(dev)) { case DRIVER_BLKVSC: if(bootverbose) device_printf(dev, "DRIVER_BLKVSC-Emulated ATA/IDE probe\n"); if (!getenv_int("hw.ata.disk_enable", &ata_disk_enable)) { if(bootverbose) device_printf(dev, "Enlightened ATA/IDE detected\n"); ret = BUS_PROBE_DEFAULT; } else if(bootverbose) device_printf(dev, "Emulated ATA/IDE set (hw.ata.disk_enable set)\n"); break; case DRIVER_STORVSC: if(bootverbose) device_printf(dev, "Enlightened SCSI device detected\n"); ret = BUS_PROBE_DEFAULT; break; default: ret = ENXIO; } return (ret); } /** * @brief StorVSC attach function * * Function responsible for allocating per-device structures, * setting up CAM interfaces and scanning for available LUNs to * be used for SCSI device peripherals. * * @param a device * @returns 0 on success or an error on failure */ static int storvsc_attach(device_t dev) { struct hv_device *hv_dev = vmbus_get_devctx(dev); enum hv_storage_type stor_type; struct storvsc_softc *sc; struct cam_devq *devq; int ret, i, j; struct hv_storvsc_request *reqp; struct root_hold_token *root_mount_token = NULL; struct hv_sgl_node *sgl_node = NULL; void *tmp_buff = NULL; /* * We need to serialize storvsc attach calls. */ root_mount_token = root_mount_hold("storvsc"); sc = device_get_softc(dev); if (sc == NULL) { ret = ENOMEM; goto cleanup; } stor_type = storvsc_get_storage_type(dev); if (stor_type == DRIVER_UNKNOWN) { ret = ENODEV; goto cleanup; } bzero(sc, sizeof(struct storvsc_softc)); /* fill in driver specific properties */ sc->hs_drv_props = &g_drv_props_table[stor_type]; /* fill in device specific properties */ sc->hs_unit = device_get_unit(dev); sc->hs_dev = hv_dev; device_set_desc(dev, g_drv_props_table[stor_type].drv_desc); LIST_INIT(&sc->hs_free_list); mtx_init(&sc->hs_lock, "hvslck", NULL, MTX_DEF); for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) { reqp = malloc(sizeof(struct hv_storvsc_request), M_DEVBUF, M_WAITOK|M_ZERO); reqp->softc = sc; LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link); } /* create sg-list page pool */ if (FALSE == g_hv_sgl_page_pool.is_init) { g_hv_sgl_page_pool.is_init = TRUE; LIST_INIT(&g_hv_sgl_page_pool.in_use_sgl_list); LIST_INIT(&g_hv_sgl_page_pool.free_sgl_list); /* * Pre-create SG list, each SG list with * HV_MAX_MULTIPAGE_BUFFER_COUNT segments, each * segment has one page buffer */ for (i = 0; i < STORVSC_MAX_IO_REQUESTS; i++) { sgl_node = malloc(sizeof(struct hv_sgl_node), M_DEVBUF, M_WAITOK|M_ZERO); sgl_node->sgl_data = sglist_alloc(HV_MAX_MULTIPAGE_BUFFER_COUNT, M_WAITOK|M_ZERO); for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) { tmp_buff = malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK|M_ZERO); sgl_node->sgl_data->sg_segs[j].ss_paddr = (vm_paddr_t)tmp_buff; } LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link); } } sc->hs_destroy = FALSE; sc->hs_drain_notify = FALSE; sc->hs_open_multi_channel = FALSE; sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema"); ret = hv_storvsc_connect_vsp(hv_dev); if (ret != 0) { goto cleanup; } /* * Create the device queue. * Hyper-V maps each target to one SCSI HBA */ devq = cam_simq_alloc(sc->hs_drv_props->drv_max_ios_per_target); if (devq == NULL) { device_printf(dev, "Failed to alloc device queue\n"); ret = ENOMEM; goto cleanup; } sc->hs_sim = cam_sim_alloc(storvsc_action, storvsc_poll, sc->hs_drv_props->drv_name, sc, sc->hs_unit, &sc->hs_lock, 1, sc->hs_drv_props->drv_max_ios_per_target, devq); if (sc->hs_sim == NULL) { device_printf(dev, "Failed to alloc sim\n"); cam_simq_free(devq); ret = ENOMEM; goto cleanup; } mtx_lock(&sc->hs_lock); /* bus_id is set to 0, need to get it from VMBUS channel query? */ if (xpt_bus_register(sc->hs_sim, dev, 0) != CAM_SUCCESS) { cam_sim_free(sc->hs_sim, /*free_devq*/TRUE); mtx_unlock(&sc->hs_lock); device_printf(dev, "Unable to register SCSI bus\n"); ret = ENXIO; goto cleanup; } if (xpt_create_path(&sc->hs_path, /*periph*/NULL, cam_sim_path(sc->hs_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sc->hs_sim)); cam_sim_free(sc->hs_sim, /*free_devq*/TRUE); mtx_unlock(&sc->hs_lock); device_printf(dev, "Unable to create path\n"); ret = ENXIO; goto cleanup; } mtx_unlock(&sc->hs_lock); root_mount_rel(root_mount_token); return (0); cleanup: root_mount_rel(root_mount_token); while (!LIST_EMPTY(&sc->hs_free_list)) { reqp = LIST_FIRST(&sc->hs_free_list); LIST_REMOVE(reqp, link); free(reqp, M_DEVBUF); } while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) { sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list); LIST_REMOVE(sgl_node, link); for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) { if (NULL != (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) { free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF); } } sglist_free(sgl_node->sgl_data); free(sgl_node, M_DEVBUF); } return (ret); } /** * @brief StorVSC device detach function * * This function is responsible for safely detaching a * StorVSC device. This includes waiting for inbound responses * to complete and freeing associated per-device structures. * * @param dev a device * returns 0 on success */ static int storvsc_detach(device_t dev) { struct storvsc_softc *sc = device_get_softc(dev); struct hv_storvsc_request *reqp = NULL; struct hv_device *hv_device = vmbus_get_devctx(dev); struct hv_sgl_node *sgl_node = NULL; int j = 0; mtx_lock(&hv_device->channel->inbound_lock); sc->hs_destroy = TRUE; mtx_unlock(&hv_device->channel->inbound_lock); /* * At this point, all outbound traffic should be disabled. We * only allow inbound traffic (responses) to proceed so that * outstanding requests can be completed. */ sc->hs_drain_notify = TRUE; sema_wait(&sc->hs_drain_sema); sc->hs_drain_notify = FALSE; /* * Since we have already drained, we don't need to busy wait. * The call to close the channel will reset the callback * under the protection of the incoming channel lock. */ hv_vmbus_channel_close(hv_device->channel); mtx_lock(&sc->hs_lock); while (!LIST_EMPTY(&sc->hs_free_list)) { reqp = LIST_FIRST(&sc->hs_free_list); LIST_REMOVE(reqp, link); free(reqp, M_DEVBUF); } mtx_unlock(&sc->hs_lock); while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) { sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list); LIST_REMOVE(sgl_node, link); for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++){ if (NULL != (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) { free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF); } } sglist_free(sgl_node->sgl_data); free(sgl_node, M_DEVBUF); } return (0); } #if HVS_TIMEOUT_TEST /** * @brief unit test for timed out operations * * This function provides unit testing capability to simulate * timed out operations. Recompilation with HV_TIMEOUT_TEST=1 * is required. * * @param reqp pointer to a request structure * @param opcode SCSI operation being performed * @param wait if 1, wait for I/O to complete */ static void storvsc_timeout_test(struct hv_storvsc_request *reqp, uint8_t opcode, int wait) { int ret; union ccb *ccb = reqp->ccb; struct storvsc_softc *sc = reqp->softc; if (reqp->vstor_packet.vm_srb.cdb[0] != opcode) { return; } if (wait) { mtx_lock(&reqp->event.mtx); } ret = hv_storvsc_io_request(sc->hs_dev, reqp); if (ret != 0) { if (wait) { mtx_unlock(&reqp->event.mtx); } printf("%s: io_request failed with %d.\n", __func__, ret); ccb->ccb_h.status = CAM_PROVIDE_FAIL; mtx_lock(&sc->hs_lock); storvsc_free_request(sc, reqp); xpt_done(ccb); mtx_unlock(&sc->hs_lock); return; } if (wait) { xpt_print(ccb->ccb_h.path, "%u: %s: waiting for IO return.\n", ticks, __func__); ret = cv_timedwait(&reqp->event.cv, &reqp->event.mtx, 60*hz); mtx_unlock(&reqp->event.mtx); xpt_print(ccb->ccb_h.path, "%u: %s: %s.\n", ticks, __func__, (ret == 0)? "IO return detected" : "IO return not detected"); /* * Now both the timer handler and io done are running * simultaneously. We want to confirm the io done always * finishes after the timer handler exits. So reqp used by * timer handler is not freed or stale. Do busy loop for * another 1/10 second to make sure io done does * wait for the timer handler to complete. */ DELAY(100*1000); mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "%u: %s: finishing, queue frozen %d, " "ccb status 0x%x scsi_status 0x%x.\n", ticks, __func__, sc->hs_frozen, ccb->ccb_h.status, ccb->csio.scsi_status); mtx_unlock(&sc->hs_lock); } } #endif /* HVS_TIMEOUT_TEST */ /** * @brief timeout handler for requests * * This function is called as a result of a callout expiring. * * @param arg pointer to a request */ static void storvsc_timeout(void *arg) { struct hv_storvsc_request *reqp = arg; struct storvsc_softc *sc = reqp->softc; union ccb *ccb = reqp->ccb; if (reqp->retries == 0) { mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "%u: IO timed out (req=0x%p), wait for another %u secs.\n", ticks, reqp, ccb->ccb_h.timeout / 1000); cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL); mtx_unlock(&sc->hs_lock); reqp->retries++; callout_reset_sbt(&reqp->callout, SBT_1MS * ccb->ccb_h.timeout, 0, storvsc_timeout, reqp, 0); #if HVS_TIMEOUT_TEST storvsc_timeout_test(reqp, SEND_DIAGNOSTIC, 0); #endif return; } mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "%u: IO (reqp = 0x%p) did not return for %u seconds, %s.\n", ticks, reqp, ccb->ccb_h.timeout * (reqp->retries+1) / 1000, (sc->hs_frozen == 0)? "freezing the queue" : "the queue is already frozen"); if (sc->hs_frozen == 0) { sc->hs_frozen = 1; xpt_freeze_simq(xpt_path_sim(ccb->ccb_h.path), 1); } mtx_unlock(&sc->hs_lock); #if HVS_TIMEOUT_TEST storvsc_timeout_test(reqp, MODE_SELECT_10, 1); #endif } /** * @brief StorVSC device poll function * * This function is responsible for servicing requests when * interrupts are disabled (i.e when we are dumping core.) * * @param sim a pointer to a CAM SCSI interface module */ static void storvsc_poll(struct cam_sim *sim) { struct storvsc_softc *sc = cam_sim_softc(sim); mtx_assert(&sc->hs_lock, MA_OWNED); mtx_unlock(&sc->hs_lock); hv_storvsc_on_channel_callback(sc->hs_dev->channel); mtx_lock(&sc->hs_lock); } /** * @brief StorVSC device action function * * This function is responsible for handling SCSI operations which * are passed from the CAM layer. The requests are in the form of * CAM control blocks which indicate the action being performed. * Not all actions require converting the request to a VSCSI protocol * message - these actions can be responded to by this driver. * Requests which are destined for a backend storage device are converted * to a VSCSI protocol message and sent on the channel connection associated * with this device. * * @param sim pointer to a CAM SCSI interface module * @param ccb pointer to a CAM control block */ static void storvsc_action(struct cam_sim *sim, union ccb *ccb) { struct storvsc_softc *sc = cam_sim_softc(sim); int res; mtx_assert(&sc->hs_lock, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE|PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = STORVSC_MAX_TARGETS; cpi->max_lun = sc->hs_drv_props->drv_max_luns_per_target; cpi->initiator_id = cpi->max_target; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 300000; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC2; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, sc->hs_drv_props->drv_name, HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; cts->transport = XPORT_SAS; cts->transport_version = 0; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC2; /* enable tag queuing and disconnected mode */ cts->proto_specific.valid = CTS_SCSI_VALID_TQ; cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; cts->xport_specific.valid = CTS_SPI_VALID_DISC; cts->xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } case XPT_SET_TRAN_SETTINGS: { ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } case XPT_CALC_GEOMETRY:{ cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); return; } case XPT_RESET_BUS: case XPT_RESET_DEV:{ #if HVS_HOST_RESET if ((res = hv_storvsc_host_reset(sc->hs_dev)) != 0) { xpt_print(ccb->ccb_h.path, "hv_storvsc_host_reset failed with %d\n", res); ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); return; } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; #else xpt_print(ccb->ccb_h.path, "%s reset not supported.\n", (ccb->ccb_h.func_code == XPT_RESET_BUS)? "bus" : "dev"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; #endif /* HVS_HOST_RESET */ } case XPT_SCSI_IO: case XPT_IMMED_NOTIFY: { struct hv_storvsc_request *reqp = NULL; if (ccb->csio.cdb_len == 0) { panic("cdl_len is 0\n"); } if (LIST_EMPTY(&sc->hs_free_list)) { ccb->ccb_h.status = CAM_REQUEUE_REQ; if (sc->hs_frozen == 0) { sc->hs_frozen = 1; xpt_freeze_simq(sim, /* count*/1); } xpt_done(ccb); return; } reqp = LIST_FIRST(&sc->hs_free_list); LIST_REMOVE(reqp, link); bzero(reqp, sizeof(struct hv_storvsc_request)); reqp->softc = sc; ccb->ccb_h.status |= CAM_SIM_QUEUED; if ((res = create_storvsc_request(ccb, reqp)) != 0) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { callout_init(&reqp->callout, 1); callout_reset_sbt(&reqp->callout, SBT_1MS * ccb->ccb_h.timeout, 0, storvsc_timeout, reqp, 0); #if HVS_TIMEOUT_TEST cv_init(&reqp->event.cv, "storvsc timeout cv"); mtx_init(&reqp->event.mtx, "storvsc timeout mutex", NULL, MTX_DEF); switch (reqp->vstor_packet.vm_srb.cdb[0]) { case MODE_SELECT_10: case SEND_DIAGNOSTIC: /* To have timer send the request. */ return; default: break; } #endif /* HVS_TIMEOUT_TEST */ } if ((res = hv_storvsc_io_request(sc->hs_dev, reqp)) != 0) { xpt_print(ccb->ccb_h.path, "hv_storvsc_io_request failed with %d\n", res); ccb->ccb_h.status = CAM_PROVIDE_FAIL; storvsc_free_request(sc, reqp); xpt_done(ccb); return; } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } } /** * @brief destroy bounce buffer * * This function is responsible for destroy a Scatter/Gather list * that create by storvsc_create_bounce_buffer() * * @param sgl- the Scatter/Gather need be destroy * @param sg_count- page count of the SG list. * */ static void storvsc_destroy_bounce_buffer(struct sglist *sgl) { struct hv_sgl_node *sgl_node = NULL; if (LIST_EMPTY(&g_hv_sgl_page_pool.in_use_sgl_list)) { printf("storvsc error: not enough in use sgl\n"); return; } sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.in_use_sgl_list); LIST_REMOVE(sgl_node, link); sgl_node->sgl_data = sgl; LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link); } /** * @brief create bounce buffer * * This function is responsible for create a Scatter/Gather list, * which hold several pages that can be aligned with page size. * * @param seg_count- SG-list segments count * @param write - if WRITE_TYPE, set SG list page used size to 0, * otherwise set used size to page size. * * return NULL if create failed */ static struct sglist * storvsc_create_bounce_buffer(uint16_t seg_count, int write) { int i = 0; struct sglist *bounce_sgl = NULL; unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE); struct hv_sgl_node *sgl_node = NULL; /* get struct sglist from free_sgl_list */ if (LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) { printf("storvsc error: not enough free sgl\n"); return NULL; } sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list); LIST_REMOVE(sgl_node, link); bounce_sgl = sgl_node->sgl_data; LIST_INSERT_HEAD(&g_hv_sgl_page_pool.in_use_sgl_list, sgl_node, link); bounce_sgl->sg_maxseg = seg_count; if (write == WRITE_TYPE) bounce_sgl->sg_nseg = 0; else bounce_sgl->sg_nseg = seg_count; for (i = 0; i < seg_count; i++) bounce_sgl->sg_segs[i].ss_len = buf_len; return bounce_sgl; } /** * @brief copy data from SG list to bounce buffer * * This function is responsible for copy data from one SG list's segments * to another SG list which used as bounce buffer. * * @param bounce_sgl - the destination SG list * @param orig_sgl - the segment of the source SG list. * @param orig_sgl_count - the count of segments. * @param orig_sgl_count - indicate which segment need bounce buffer, * set 1 means need. * */ static void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl, bus_dma_segment_t *orig_sgl, unsigned int orig_sgl_count, uint64_t seg_bits) { int src_sgl_idx = 0; for (src_sgl_idx = 0; src_sgl_idx < orig_sgl_count; src_sgl_idx++) { if (seg_bits & (1 << src_sgl_idx)) { memcpy((void*)bounce_sgl->sg_segs[src_sgl_idx].ss_paddr, (void*)orig_sgl[src_sgl_idx].ds_addr, orig_sgl[src_sgl_idx].ds_len); bounce_sgl->sg_segs[src_sgl_idx].ss_len = orig_sgl[src_sgl_idx].ds_len; } } } /** * @brief copy data from SG list which used as bounce to another SG list * * This function is responsible for copy data from one SG list with bounce * buffer to another SG list's segments. * * @param dest_sgl - the destination SG list's segments * @param dest_sgl_count - the count of destination SG list's segment. * @param src_sgl - the source SG list. * @param seg_bits - indicate which segment used bounce buffer of src SG-list. * */ void storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl, unsigned int dest_sgl_count, struct sglist* src_sgl, uint64_t seg_bits) { int sgl_idx = 0; for (sgl_idx = 0; sgl_idx < dest_sgl_count; sgl_idx++) { if (seg_bits & (1 << sgl_idx)) { memcpy((void*)(dest_sgl[sgl_idx].ds_addr), (void*)(src_sgl->sg_segs[sgl_idx].ss_paddr), src_sgl->sg_segs[sgl_idx].ss_len); } } } /** * @brief check SG list with bounce buffer or not * * This function is responsible for check if need bounce buffer for SG list. * * @param sgl - the SG list's segments * @param sg_count - the count of SG list's segment. * @param bits - segmengs number that need bounce buffer * * return -1 if SG list needless bounce buffer */ static int storvsc_check_bounce_buffer_sgl(bus_dma_segment_t *sgl, unsigned int sg_count, uint64_t *bits) { int i = 0; int offset = 0; uint64_t phys_addr = 0; uint64_t tmp_bits = 0; boolean_t found_hole = FALSE; boolean_t pre_aligned = TRUE; if (sg_count < 2){ return -1; } *bits = 0; phys_addr = vtophys(sgl[0].ds_addr); offset = phys_addr - trunc_page(phys_addr); if (offset != 0) { pre_aligned = FALSE; tmp_bits |= 1; } for (i = 1; i < sg_count; i++) { phys_addr = vtophys(sgl[i].ds_addr); offset = phys_addr - trunc_page(phys_addr); if (offset == 0) { if (FALSE == pre_aligned){ /* * This segment is aligned, if the previous * one is not aligned, find a hole */ found_hole = TRUE; } pre_aligned = TRUE; } else { tmp_bits |= 1 << i; if (!pre_aligned) { if (phys_addr != vtophys(sgl[i-1].ds_addr + sgl[i-1].ds_len)) { /* * Check whether connect to previous * segment,if not, find the hole */ found_hole = TRUE; } } else { found_hole = TRUE; } pre_aligned = FALSE; } } if (!found_hole) { return (-1); } else { *bits = tmp_bits; return 0; } } /** * @brief Fill in a request structure based on a CAM control block * * Fills in a request structure based on the contents of a CAM control * block. The request structure holds the payload information for * VSCSI protocol request. * * @param ccb pointer to a CAM contorl block * @param reqp pointer to a request structure */ static int create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp) { struct ccb_scsiio *csio = &ccb->csio; uint64_t phys_addr; uint32_t bytes_to_copy = 0; uint32_t pfn_num = 0; uint32_t pfn; uint64_t not_aligned_seg_bits = 0; /* refer to struct vmscsi_req for meanings of these two fields */ reqp->vstor_packet.u.vm_srb.port = cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)); reqp->vstor_packet.u.vm_srb.path_id = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); reqp->vstor_packet.u.vm_srb.target_id = ccb->ccb_h.target_id; reqp->vstor_packet.u.vm_srb.lun = ccb->ccb_h.target_lun; reqp->vstor_packet.u.vm_srb.cdb_len = csio->cdb_len; if(ccb->ccb_h.flags & CAM_CDB_POINTER) { memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_ptr, csio->cdb_len); } else { memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_bytes, csio->cdb_len); } switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_OUT: reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE; break; case CAM_DIR_IN: reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE; break; case CAM_DIR_NONE: reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE; break; default: reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE; break; } reqp->sense_data = &csio->sense_data; reqp->sense_info_len = csio->sense_len; reqp->ccb = ccb; if (0 == csio->dxfer_len) { return (0); } reqp->data_buf.length = csio->dxfer_len; switch (ccb->ccb_h.flags & CAM_DATA_MASK) { case CAM_DATA_VADDR: { bytes_to_copy = csio->dxfer_len; phys_addr = vtophys(csio->data_ptr); reqp->data_buf.offset = phys_addr & PAGE_MASK; while (bytes_to_copy != 0) { int bytes, page_offset; phys_addr = vtophys(&csio->data_ptr[reqp->data_buf.length - bytes_to_copy]); pfn = phys_addr >> PAGE_SHIFT; reqp->data_buf.pfn_array[pfn_num] = pfn; page_offset = phys_addr & PAGE_MASK; bytes = min(PAGE_SIZE - page_offset, bytes_to_copy); bytes_to_copy -= bytes; pfn_num++; } break; } case CAM_DATA_SG: { int i = 0; int offset = 0; int ret; bus_dma_segment_t *storvsc_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr; u_int16_t storvsc_sg_count = ccb->csio.sglist_cnt; printf("Storvsc: get SG I/O operation, %d\n", reqp->vstor_packet.u.vm_srb.data_in); if (storvsc_sg_count > HV_MAX_MULTIPAGE_BUFFER_COUNT){ printf("Storvsc: %d segments is too much, " "only support %d segments\n", storvsc_sg_count, HV_MAX_MULTIPAGE_BUFFER_COUNT); return (EINVAL); } /* * We create our own bounce buffer function currently. Idealy * we should use BUS_DMA(9) framework. But with current BUS_DMA * code there is no callback API to check the page alignment of * middle segments before busdma can decide if a bounce buffer * is needed for particular segment. There is callback, * "bus_dma_filter_t *filter", but the parrameters are not * sufficient for storvsc driver. * TODO: * Add page alignment check in BUS_DMA(9) callback. Once * this is complete, switch the following code to use * BUS_DMA(9) for storvsc bounce buffer support. */ /* check if we need to create bounce buffer */ ret = storvsc_check_bounce_buffer_sgl(storvsc_sglist, storvsc_sg_count, ¬_aligned_seg_bits); if (ret != -1) { reqp->bounce_sgl = storvsc_create_bounce_buffer(storvsc_sg_count, reqp->vstor_packet.u.vm_srb.data_in); if (NULL == reqp->bounce_sgl) { printf("Storvsc_error: " "create bounce buffer failed.\n"); return (ENOMEM); } reqp->bounce_sgl_count = storvsc_sg_count; reqp->not_aligned_seg_bits = not_aligned_seg_bits; /* * if it is write, we need copy the original data *to bounce buffer */ if (WRITE_TYPE == reqp->vstor_packet.u.vm_srb.data_in) { storvsc_copy_sgl_to_bounce_buf( reqp->bounce_sgl, storvsc_sglist, storvsc_sg_count, reqp->not_aligned_seg_bits); } /* transfer virtual address to physical frame number */ if (reqp->not_aligned_seg_bits & 0x1){ phys_addr = vtophys(reqp->bounce_sgl->sg_segs[0].ss_paddr); }else{ phys_addr = vtophys(storvsc_sglist[0].ds_addr); } reqp->data_buf.offset = phys_addr & PAGE_MASK; pfn = phys_addr >> PAGE_SHIFT; reqp->data_buf.pfn_array[0] = pfn; for (i = 1; i < storvsc_sg_count; i++) { if (reqp->not_aligned_seg_bits & (1 << i)) { phys_addr = vtophys(reqp->bounce_sgl->sg_segs[i].ss_paddr); } else { phys_addr = vtophys(storvsc_sglist[i].ds_addr); } pfn = phys_addr >> PAGE_SHIFT; reqp->data_buf.pfn_array[i] = pfn; } } else { phys_addr = vtophys(storvsc_sglist[0].ds_addr); reqp->data_buf.offset = phys_addr & PAGE_MASK; for (i = 0; i < storvsc_sg_count; i++) { phys_addr = vtophys(storvsc_sglist[i].ds_addr); pfn = phys_addr >> PAGE_SHIFT; reqp->data_buf.pfn_array[i] = pfn; } /* check the last segment cross boundary or not */ offset = phys_addr & PAGE_MASK; if (offset) { phys_addr = vtophys(storvsc_sglist[i-1].ds_addr + PAGE_SIZE - offset); pfn = phys_addr >> PAGE_SHIFT; reqp->data_buf.pfn_array[i] = pfn; } reqp->bounce_sgl_count = 0; } break; } default: printf("Unknow flags: %d\n", ccb->ccb_h.flags); return(EINVAL); } return(0); } /* * Modified based on scsi_print_inquiry which is responsible to * print the detail information for scsi_inquiry_data. * * Return 1 if it is valid, 0 otherwise. */ static inline int is_inquiry_valid(const struct scsi_inquiry_data *inq_data) { uint8_t type; char vendor[16], product[48], revision[16]; /* * Check device type and qualifier */ if (!(SID_QUAL_IS_VENDOR_UNIQUE(inq_data) || SID_QUAL(inq_data) == SID_QUAL_LU_CONNECTED)) return (0); type = SID_TYPE(inq_data); switch (type) { case T_DIRECT: case T_SEQUENTIAL: case T_PRINTER: case T_PROCESSOR: case T_WORM: case T_CDROM: case T_SCANNER: case T_OPTICAL: case T_CHANGER: case T_COMM: case T_STORARRAY: case T_ENCLOSURE: case T_RBC: case T_OCRW: case T_OSD: case T_ADC: break; case T_NODEVICE: default: return (0); } /* * Check vendor, product, and revision */ cam_strvis(vendor, inq_data->vendor, sizeof(inq_data->vendor), sizeof(vendor)); cam_strvis(product, inq_data->product, sizeof(inq_data->product), sizeof(product)); cam_strvis(revision, inq_data->revision, sizeof(inq_data->revision), sizeof(revision)); if (strlen(vendor) == 0 || strlen(product) == 0 || strlen(revision) == 0) return (0); return (1); } /** * @brief completion function before returning to CAM * * I/O process has been completed and the result needs * to be passed to the CAM layer. * Free resources related to this request. * * @param reqp pointer to a request structure */ static void storvsc_io_done(struct hv_storvsc_request *reqp) { union ccb *ccb = reqp->ccb; struct ccb_scsiio *csio = &ccb->csio; struct storvsc_softc *sc = reqp->softc; struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb; bus_dma_segment_t *ori_sglist = NULL; int ori_sg_count = 0; /* destroy bounce buffer if it is used */ if (reqp->bounce_sgl_count) { ori_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr; ori_sg_count = ccb->csio.sglist_cnt; /* * If it is READ operation, we should copy back the data * to original SG list. */ if (READ_TYPE == reqp->vstor_packet.u.vm_srb.data_in) { storvsc_copy_from_bounce_buf_to_sgl(ori_sglist, ori_sg_count, reqp->bounce_sgl, reqp->not_aligned_seg_bits); } storvsc_destroy_bounce_buffer(reqp->bounce_sgl); reqp->bounce_sgl_count = 0; } if (reqp->retries > 0) { mtx_lock(&sc->hs_lock); #if HVS_TIMEOUT_TEST xpt_print(ccb->ccb_h.path, "%u: IO returned after timeout, " "waking up timer handler if any.\n", ticks); mtx_lock(&reqp->event.mtx); cv_signal(&reqp->event.cv); mtx_unlock(&reqp->event.mtx); #endif reqp->retries = 0; xpt_print(ccb->ccb_h.path, "%u: IO returned after timeout, " "stopping timer if any.\n", ticks); mtx_unlock(&sc->hs_lock); } /* * callout_drain() will wait for the timer handler to finish * if it is running. So we don't need any lock to synchronize * between this routine and the timer handler. * Note that we need to make sure reqp is not freed when timer * handler is using or will use it. */ if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { callout_drain(&reqp->callout); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; if (vm_srb->scsi_status == SCSI_STATUS_OK) { const struct scsi_generic *cmd; /* * Check whether the data for INQUIRY cmd is valid or * not. Windows 10 and Windows 2016 send all zero * inquiry data to VM even for unpopulated slots. */ cmd = (const struct scsi_generic *) ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes); if (cmd->opcode == INQUIRY && is_inquiry_valid( (const struct scsi_inquiry_data *)csio->data_ptr) == 0) { ccb->ccb_h.status |= CAM_DEV_NOT_THERE; if (bootverbose) { mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "storvsc uninstalled device\n"); mtx_unlock(&sc->hs_lock); } } else { ccb->ccb_h.status |= CAM_REQ_CMP; } } else { mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "storvsc scsi_status = %d\n", vm_srb->scsi_status); mtx_unlock(&sc->hs_lock); ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; } ccb->csio.scsi_status = (vm_srb->scsi_status & 0xFF); ccb->csio.resid = ccb->csio.dxfer_len - vm_srb->transfer_len; if (reqp->sense_info_len != 0) { csio->sense_resid = csio->sense_len - reqp->sense_info_len; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } mtx_lock(&sc->hs_lock); if (reqp->softc->hs_frozen == 1) { xpt_print(ccb->ccb_h.path, "%u: storvsc unfreezing softc 0x%p.\n", ticks, reqp->softc); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; reqp->softc->hs_frozen = 0; } storvsc_free_request(sc, reqp); xpt_done(ccb); mtx_unlock(&sc->hs_lock); } /** * @brief Free a request structure * * Free a request structure by returning it to the free list * * @param sc pointer to a softc * @param reqp pointer to a request structure */ static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp) { LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link); } /** * @brief Determine type of storage device from GUID * * Using the type GUID, determine if this is a StorVSC (paravirtual * SCSI or BlkVSC (paravirtual IDE) device. * * @param dev a device * returns an enum */ static enum hv_storage_type storvsc_get_storage_type(device_t dev) { const char *p = vmbus_get_type(dev); if (!memcmp(p, &gBlkVscDeviceType, sizeof(hv_guid))) { return DRIVER_BLKVSC; } else if (!memcmp(p, &gStorVscDeviceType, sizeof(hv_guid))) { return DRIVER_STORVSC; } return (DRIVER_UNKNOWN); } Index: projects/release-pkg/sys/dev/hyperv =================================================================== --- projects/release-pkg/sys/dev/hyperv (revision 295925) +++ projects/release-pkg/sys/dev/hyperv (revision 295926) Property changes on: projects/release-pkg/sys/dev/hyperv ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/dev/hyperv:r295886-295925 Index: projects/release-pkg/sys/dev/uart/uart_cpu_fdt.c =================================================================== --- projects/release-pkg/sys/dev/uart/uart_cpu_fdt.c (revision 295925) +++ projects/release-pkg/sys/dev/uart/uart_cpu_fdt.c (revision 295926) @@ -1,216 +1,211 @@ /*- * Copyright (c) 2009-2010 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #ifndef __aarch64__ #include #endif #include #include #include #include #include #include #include -#ifdef __aarch64__ -extern bus_space_tag_t fdtbus_bs_tag; -#endif - /* * UART console routines. */ bus_space_tag_t uart_bus_space_io; bus_space_tag_t uart_bus_space_mem; int uart_cpu_eqres(struct uart_bas *b1, struct uart_bas *b2) { if (b1->bst != b2->bst) return (0); if (pmap_kextract(b1->bsh) == 0) return (0); if (pmap_kextract(b2->bsh) == 0) return (0); return ((pmap_kextract(b1->bsh) == pmap_kextract(b2->bsh)) ? 1 : 0); } static int phandle_chosen_propdev(phandle_t chosen, const char *name, phandle_t *node) { char buf[64]; if (OF_getprop(chosen, name, buf, sizeof(buf)) <= 0) return (ENXIO); if ((*node = OF_finddevice(buf)) == -1) return (ENXIO); return (0); } static const struct ofw_compat_data * uart_fdt_find_compatible(phandle_t node, const struct ofw_compat_data *cd) { const struct ofw_compat_data *ocd; for (ocd = cd; ocd->ocd_str != NULL; ocd++) { if (fdt_is_compatible(node, ocd->ocd_str)) return (ocd); } return (NULL); } static uintptr_t uart_fdt_find_by_node(phandle_t node, int class_list) { struct ofw_compat_data **cd; const struct ofw_compat_data *ocd; if (class_list) { SET_FOREACH(cd, uart_fdt_class_set) { ocd = uart_fdt_find_compatible(node, *cd); if ((ocd != NULL) && (ocd->ocd_data != 0)) return (ocd->ocd_data); } } else { SET_FOREACH(cd, uart_fdt_class_and_device_set) { ocd = uart_fdt_find_compatible(node, *cd); if ((ocd != NULL) && (ocd->ocd_data != 0)) return (ocd->ocd_data); } } return (0); } int uart_cpu_getdev(int devtype, struct uart_devinfo *di) { const char *propnames[] = {"stdout-path", "linux,stdout-path", "stdout", "stdin-path", "stdin", NULL}; const char **name; struct uart_class *class; phandle_t node, chosen; pcell_t shift, br, rclk; char *cp; int err; - uart_bus_space_mem = fdtbus_bs_tag; - uart_bus_space_io = NULL; - /* Allow overriding the FDT using the environment. */ class = &uart_ns8250_class; err = uart_getenv(devtype, di, class); if (!err) return (0); if (devtype != UART_DEV_CONSOLE) return (ENXIO); /* Has the user forced a specific device node? */ cp = kern_getenv("hw.fdt.console"); if (cp == NULL) { /* * Retrieve /chosen/std{in,out}. */ node = -1; if ((chosen = OF_finddevice("/chosen")) != -1) { for (name = propnames; *name != NULL; name++) { if (phandle_chosen_propdev(chosen, *name, &node) == 0) break; } } if (chosen == -1 || *name == NULL) node = OF_finddevice("serial0"); /* Last ditch */ } else { node = OF_finddevice(cp); } if (node == -1) /* Can't find anything */ return (ENXIO); /* * Check old style of UART definition first. Unfortunately, the common * FDT processing is not possible if we have clock, power domains and * pinmux stuff. */ class = (struct uart_class *)uart_fdt_find_by_node(node, 0); if (class != NULL) { if ((err = uart_fdt_get_clock(node, &rclk)) != 0) return (err); } else { /* Check class only linker set */ class = (struct uart_class *)uart_fdt_find_by_node(node, 1); if (class == NULL) return (ENXIO); rclk = 0; } /* * Retrieve serial attributes. */ if (uart_fdt_get_shift(node, &shift) != 0) shift = uart_getregshift(class); - if (OF_getprop(node, "current-speed", &br, sizeof(br)) <= 0) + if (OF_getencprop(node, "current-speed", &br, sizeof(br)) <= 0) br = 0; - else - br = fdt32_to_cpu(br); /* * Finalize configuration. */ di->bas.chan = 0; di->bas.regshft = (u_int)shift; di->baudrate = br; di->bas.rclk = (u_int)rclk; di->ops = uart_getops(class); di->databits = 8; di->stopbits = 1; di->parity = UART_PARITY_NONE; - return (OF_decode_addr(node, 0, &di->bas.bst, &di->bas.bsh, NULL)); + err = OF_decode_addr(node, 0, &di->bas.bst, &di->bas.bsh, NULL); + uart_bus_space_mem = di->bas.bst; + uart_bus_space_io = NULL; + + return (err); } Index: projects/release-pkg/sys/dev/uart/uart_dev_lpc.c =================================================================== --- projects/release-pkg/sys/dev/uart/uart_dev_lpc.c (revision 295925) +++ projects/release-pkg/sys/dev/uart/uart_dev_lpc.c (revision 295926) @@ -1,936 +1,935 @@ /*- * Copyright (c) 2003 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include -#include #include #include #include #include #include #include #include "uart_if.h" #define DEFAULT_RCLK (13 * 1000 * 1000) static bus_space_handle_t bsh_clkpwr; #define lpc_ns8250_get_clkreg(_bas, _reg) \ - bus_space_read_4(fdtbus_bs_tag, bsh_clkpwr, (_reg)) + bus_space_read_4((_bas)->bst, bsh_clkpwr, (_reg)) #define lpc_ns8250_set_clkreg(_bas, _reg, _val) \ - bus_space_write_4(fdtbus_bs_tag, bsh_clkpwr, (_reg), (_val)) + bus_space_write_4((_bas)->bst, bsh_clkpwr, (_reg), (_val)) /* * Clear pending interrupts. THRE is cleared by reading IIR. Data * that may have been received gets lost here. */ static void lpc_ns8250_clrint(struct uart_bas *bas) { uint8_t iir, lsr; iir = uart_getreg(bas, REG_IIR); while ((iir & IIR_NOPEND) == 0) { iir &= IIR_IMASK; if (iir == IIR_RLS) { lsr = uart_getreg(bas, REG_LSR); if (lsr & (LSR_BI|LSR_FE|LSR_PE)) (void)uart_getreg(bas, REG_DATA); } else if (iir == IIR_RXRDY || iir == IIR_RXTOUT) (void)uart_getreg(bas, REG_DATA); else if (iir == IIR_MLSC) (void)uart_getreg(bas, REG_MSR); uart_barrier(bas); iir = uart_getreg(bas, REG_IIR); } } static int lpc_ns8250_delay(struct uart_bas *bas) { uint32_t uclk; int x, y; uclk = lpc_ns8250_get_clkreg(bas, LPC_CLKPWR_UART_U5CLK); x = (uclk >> 8) & 0xff; y = uclk & 0xff; return (16000000 / (bas->rclk * x / y)); } static void lpc_ns8250_divisor(int rclk, int baudrate, int *x, int *y) { switch (baudrate) { case 2400: *x = 1; *y = 255; return; case 4800: *x = 1; *y = 169; return; case 9600: *x = 3; *y = 254; return; case 19200: *x = 3; *y = 127; return; case 38400: *x = 6; *y = 127; return; case 57600: *x = 9; *y = 127; return; default: case 115200: *x = 19; *y = 134; return; case 230400: *x = 19; *y = 67; return; case 460800: *x = 38; *y = 67; return; } } static int lpc_ns8250_drain(struct uart_bas *bas, int what) { int delay, limit; delay = lpc_ns8250_delay(bas); if (what & UART_DRAIN_TRANSMITTER) { /* * Pick an arbitrary high limit to avoid getting stuck in * an infinite loop when the hardware is broken. Make the * limit high enough to handle large FIFOs. */ limit = 10*1024; while ((uart_getreg(bas, REG_LSR) & LSR_TEMT) == 0 && --limit) DELAY(delay); if (limit == 0) { /* printf("lpc_ns8250: transmitter appears stuck... "); */ return (EIO); } } if (what & UART_DRAIN_RECEIVER) { /* * Pick an arbitrary high limit to avoid getting stuck in * an infinite loop when the hardware is broken. Make the * limit high enough to handle large FIFOs and integrated * UARTs. The HP rx2600 for example has 3 UARTs on the * management board that tend to get a lot of data send * to it when the UART is first activated. */ limit=10*4096; while ((uart_getreg(bas, REG_LSR) & LSR_RXRDY) && --limit) { (void)uart_getreg(bas, REG_DATA); uart_barrier(bas); DELAY(delay << 2); } if (limit == 0) { /* printf("lpc_ns8250: receiver appears broken... "); */ return (EIO); } } return (0); } /* * We can only flush UARTs with FIFOs. UARTs without FIFOs should be * drained. WARNING: this function clobbers the FIFO setting! */ static void lpc_ns8250_flush(struct uart_bas *bas, int what) { uint8_t fcr; fcr = FCR_ENABLE; if (what & UART_FLUSH_TRANSMITTER) fcr |= FCR_XMT_RST; if (what & UART_FLUSH_RECEIVER) fcr |= FCR_RCV_RST; uart_setreg(bas, REG_FCR, fcr); uart_barrier(bas); } static int lpc_ns8250_param(struct uart_bas *bas, int baudrate, int databits, int stopbits, int parity) { int xdiv, ydiv; uint8_t lcr; lcr = 0; if (databits >= 8) lcr |= LCR_8BITS; else if (databits == 7) lcr |= LCR_7BITS; else if (databits == 6) lcr |= LCR_6BITS; else lcr |= LCR_5BITS; if (stopbits > 1) lcr |= LCR_STOPB; lcr |= parity << 3; /* Set baudrate. */ if (baudrate > 0) { uart_setreg(bas, REG_LCR, lcr | LCR_DLAB); uart_barrier(bas); uart_setreg(bas, REG_DLL, 0x00); uart_setreg(bas, REG_DLH, 0x00); uart_barrier(bas); lpc_ns8250_divisor(bas->rclk, baudrate, &xdiv, &ydiv); lpc_ns8250_set_clkreg(bas, LPC_CLKPWR_UART_U5CLK, LPC_CLKPWR_UART_UCLK_X(xdiv) | LPC_CLKPWR_UART_UCLK_Y(ydiv)); } /* Set LCR and clear DLAB. */ uart_setreg(bas, REG_LCR, lcr); uart_barrier(bas); return (0); } /* * Low-level UART interface. */ static int lpc_ns8250_probe(struct uart_bas *bas); static void lpc_ns8250_init(struct uart_bas *bas, int, int, int, int); static void lpc_ns8250_term(struct uart_bas *bas); static void lpc_ns8250_putc(struct uart_bas *bas, int); static int lpc_ns8250_rxready(struct uart_bas *bas); static int lpc_ns8250_getc(struct uart_bas *bas, struct mtx *); static struct uart_ops uart_lpc_ns8250_ops = { .probe = lpc_ns8250_probe, .init = lpc_ns8250_init, .term = lpc_ns8250_term, .putc = lpc_ns8250_putc, .rxready = lpc_ns8250_rxready, .getc = lpc_ns8250_getc, }; static int lpc_ns8250_probe(struct uart_bas *bas) { #if 0 u_char val; /* Check known 0 bits that don't depend on DLAB. */ val = uart_getreg(bas, REG_IIR); if (val & 0x30) return (ENXIO); /* * Bit 6 of the MCR (= 0x40) appears to be 1 for the Sun1699 * chip, but otherwise doesn't seem to have a function. In * other words, uart(4) works regardless. Ignore that bit so * the probe succeeds. */ val = uart_getreg(bas, REG_MCR); if (val & 0xa0) return (ENXIO); #endif return (0); } static void lpc_ns8250_init(struct uart_bas *bas, int baudrate, int databits, int stopbits, int parity) { u_char ier; u_long clkmode; /* Enable UART clock */ - bus_space_map(fdtbus_bs_tag, LPC_CLKPWR_PHYS_BASE, LPC_CLKPWR_SIZE, 0, + bus_space_map(bas->bst, LPC_CLKPWR_PHYS_BASE, LPC_CLKPWR_SIZE, 0, &bsh_clkpwr); clkmode = lpc_ns8250_get_clkreg(bas, LPC_UART_CLKMODE); lpc_ns8250_set_clkreg(bas, LPC_UART_CLKMODE, clkmode | LPC_UART_CLKMODE_UART5(1)); #if 0 /* Work around H/W bug */ uart_setreg(bas, REG_DATA, 0x00); #endif if (bas->rclk == 0) bas->rclk = DEFAULT_RCLK; lpc_ns8250_param(bas, baudrate, databits, stopbits, parity); /* Disable all interrupt sources. */ /* * We use 0xe0 instead of 0xf0 as the mask because the XScale PXA * UARTs split the receive time-out interrupt bit out separately as * 0x10. This gets handled by ier_mask and ier_rxbits below. */ ier = uart_getreg(bas, REG_IER) & 0xe0; uart_setreg(bas, REG_IER, ier); uart_barrier(bas); /* Disable the FIFO (if present). */ uart_setreg(bas, REG_FCR, 0); uart_barrier(bas); /* Set RTS & DTR. */ uart_setreg(bas, REG_MCR, MCR_IE | MCR_RTS | MCR_DTR); uart_barrier(bas); lpc_ns8250_clrint(bas); } static void lpc_ns8250_term(struct uart_bas *bas) { /* Clear RTS & DTR. */ uart_setreg(bas, REG_MCR, MCR_IE); uart_barrier(bas); } static void lpc_ns8250_putc(struct uart_bas *bas, int c) { int limit; limit = 250000; while ((uart_getreg(bas, REG_LSR) & LSR_THRE) == 0 && --limit) DELAY(4); uart_setreg(bas, REG_DATA, c); uart_barrier(bas); limit = 250000; while ((uart_getreg(bas, REG_LSR) & LSR_TEMT) == 0 && --limit) DELAY(4); } static int lpc_ns8250_rxready(struct uart_bas *bas) { return ((uart_getreg(bas, REG_LSR) & LSR_RXRDY) != 0 ? 1 : 0); } static int lpc_ns8250_getc(struct uart_bas *bas, struct mtx *hwmtx) { int c; uart_lock(hwmtx); while ((uart_getreg(bas, REG_LSR) & LSR_RXRDY) == 0) { uart_unlock(hwmtx); DELAY(4); uart_lock(hwmtx); } c = uart_getreg(bas, REG_DATA); uart_unlock(hwmtx); return (c); } /* * High-level UART interface. */ struct lpc_ns8250_softc { struct uart_softc base; uint8_t fcr; uint8_t ier; uint8_t mcr; uint8_t ier_mask; uint8_t ier_rxbits; }; static int lpc_ns8250_bus_attach(struct uart_softc *); static int lpc_ns8250_bus_detach(struct uart_softc *); static int lpc_ns8250_bus_flush(struct uart_softc *, int); static int lpc_ns8250_bus_getsig(struct uart_softc *); static int lpc_ns8250_bus_ioctl(struct uart_softc *, int, intptr_t); static int lpc_ns8250_bus_ipend(struct uart_softc *); static int lpc_ns8250_bus_param(struct uart_softc *, int, int, int, int); static int lpc_ns8250_bus_probe(struct uart_softc *); static int lpc_ns8250_bus_receive(struct uart_softc *); static int lpc_ns8250_bus_setsig(struct uart_softc *, int); static int lpc_ns8250_bus_transmit(struct uart_softc *); static void lpc_ns8250_bus_grab(struct uart_softc *); static void lpc_ns8250_bus_ungrab(struct uart_softc *); static kobj_method_t lpc_ns8250_methods[] = { KOBJMETHOD(uart_attach, lpc_ns8250_bus_attach), KOBJMETHOD(uart_detach, lpc_ns8250_bus_detach), KOBJMETHOD(uart_flush, lpc_ns8250_bus_flush), KOBJMETHOD(uart_getsig, lpc_ns8250_bus_getsig), KOBJMETHOD(uart_ioctl, lpc_ns8250_bus_ioctl), KOBJMETHOD(uart_ipend, lpc_ns8250_bus_ipend), KOBJMETHOD(uart_param, lpc_ns8250_bus_param), KOBJMETHOD(uart_probe, lpc_ns8250_bus_probe), KOBJMETHOD(uart_receive, lpc_ns8250_bus_receive), KOBJMETHOD(uart_setsig, lpc_ns8250_bus_setsig), KOBJMETHOD(uart_transmit, lpc_ns8250_bus_transmit), KOBJMETHOD(uart_grab, lpc_ns8250_bus_grab), KOBJMETHOD(uart_ungrab, lpc_ns8250_bus_ungrab), { 0, 0 } }; static struct uart_class uart_lpc_class = { "lpc_ns8250", lpc_ns8250_methods, sizeof(struct lpc_ns8250_softc), .uc_ops = &uart_lpc_ns8250_ops, .uc_range = 8, .uc_rclk = DEFAULT_RCLK, .uc_rshift = 0 }; static struct ofw_compat_data compat_data[] = { {"lpc,uart", (uintptr_t)&uart_lpc_class}, {NULL, (uintptr_t)NULL}, }; UART_FDT_CLASS_AND_DEVICE(compat_data); #define SIGCHG(c, i, s, d) \ if (c) { \ i |= (i & s) ? s : s | d; \ } else { \ i = (i & s) ? (i & ~s) | d : i; \ } static int lpc_ns8250_bus_attach(struct uart_softc *sc) { struct lpc_ns8250_softc *lpc_ns8250 = (struct lpc_ns8250_softc*)sc; struct uart_bas *bas; unsigned int ivar; bas = &sc->sc_bas; lpc_ns8250->mcr = uart_getreg(bas, REG_MCR); lpc_ns8250->fcr = FCR_ENABLE | FCR_DMA; if (!resource_int_value("uart", device_get_unit(sc->sc_dev), "flags", &ivar)) { if (UART_FLAGS_FCR_RX_LOW(ivar)) lpc_ns8250->fcr |= FCR_RX_LOW; else if (UART_FLAGS_FCR_RX_MEDL(ivar)) lpc_ns8250->fcr |= FCR_RX_MEDL; else if (UART_FLAGS_FCR_RX_HIGH(ivar)) lpc_ns8250->fcr |= FCR_RX_HIGH; else lpc_ns8250->fcr |= FCR_RX_MEDH; } else lpc_ns8250->fcr |= FCR_RX_HIGH; /* Get IER mask */ ivar = 0xf0; resource_int_value("uart", device_get_unit(sc->sc_dev), "ier_mask", &ivar); lpc_ns8250->ier_mask = (uint8_t)(ivar & 0xff); /* Get IER RX interrupt bits */ ivar = IER_EMSC | IER_ERLS | IER_ERXRDY; resource_int_value("uart", device_get_unit(sc->sc_dev), "ier_rxbits", &ivar); lpc_ns8250->ier_rxbits = (uint8_t)(ivar & 0xff); uart_setreg(bas, REG_FCR, lpc_ns8250->fcr); uart_barrier(bas); lpc_ns8250_bus_flush(sc, UART_FLUSH_RECEIVER|UART_FLUSH_TRANSMITTER); if (lpc_ns8250->mcr & MCR_DTR) sc->sc_hwsig |= SER_DTR; if (lpc_ns8250->mcr & MCR_RTS) sc->sc_hwsig |= SER_RTS; lpc_ns8250_bus_getsig(sc); lpc_ns8250_clrint(bas); lpc_ns8250->ier = uart_getreg(bas, REG_IER) & lpc_ns8250->ier_mask; lpc_ns8250->ier |= lpc_ns8250->ier_rxbits; uart_setreg(bas, REG_IER, lpc_ns8250->ier); uart_barrier(bas); return (0); } static int lpc_ns8250_bus_detach(struct uart_softc *sc) { struct lpc_ns8250_softc *lpc_ns8250; struct uart_bas *bas; u_char ier; lpc_ns8250 = (struct lpc_ns8250_softc *)sc; bas = &sc->sc_bas; ier = uart_getreg(bas, REG_IER) & lpc_ns8250->ier_mask; uart_setreg(bas, REG_IER, ier); uart_barrier(bas); lpc_ns8250_clrint(bas); return (0); } static int lpc_ns8250_bus_flush(struct uart_softc *sc, int what) { struct lpc_ns8250_softc *lpc_ns8250 = (struct lpc_ns8250_softc*)sc; struct uart_bas *bas; int error; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); if (sc->sc_rxfifosz > 1) { lpc_ns8250_flush(bas, what); uart_setreg(bas, REG_FCR, lpc_ns8250->fcr); uart_barrier(bas); error = 0; } else error = lpc_ns8250_drain(bas, what); uart_unlock(sc->sc_hwmtx); return (error); } static int lpc_ns8250_bus_getsig(struct uart_softc *sc) { uint32_t new, old, sig; uint8_t msr; do { old = sc->sc_hwsig; sig = old; uart_lock(sc->sc_hwmtx); msr = uart_getreg(&sc->sc_bas, REG_MSR); uart_unlock(sc->sc_hwmtx); SIGCHG(msr & MSR_DSR, sig, SER_DSR, SER_DDSR); SIGCHG(msr & MSR_CTS, sig, SER_CTS, SER_DCTS); SIGCHG(msr & MSR_DCD, sig, SER_DCD, SER_DDCD); SIGCHG(msr & MSR_RI, sig, SER_RI, SER_DRI); new = sig & ~SER_MASK_DELTA; } while (!atomic_cmpset_32(&sc->sc_hwsig, old, new)); return (sig); } static int lpc_ns8250_bus_ioctl(struct uart_softc *sc, int request, intptr_t data) { struct uart_bas *bas; int baudrate, divisor, error; uint8_t efr, lcr; bas = &sc->sc_bas; error = 0; uart_lock(sc->sc_hwmtx); switch (request) { case UART_IOCTL_BREAK: lcr = uart_getreg(bas, REG_LCR); if (data) lcr |= LCR_SBREAK; else lcr &= ~LCR_SBREAK; uart_setreg(bas, REG_LCR, lcr); uart_barrier(bas); break; case UART_IOCTL_IFLOW: lcr = uart_getreg(bas, REG_LCR); uart_barrier(bas); uart_setreg(bas, REG_LCR, 0xbf); uart_barrier(bas); efr = uart_getreg(bas, REG_EFR); if (data) efr |= EFR_RTS; else efr &= ~EFR_RTS; uart_setreg(bas, REG_EFR, efr); uart_barrier(bas); uart_setreg(bas, REG_LCR, lcr); uart_barrier(bas); break; case UART_IOCTL_OFLOW: lcr = uart_getreg(bas, REG_LCR); uart_barrier(bas); uart_setreg(bas, REG_LCR, 0xbf); uart_barrier(bas); efr = uart_getreg(bas, REG_EFR); if (data) efr |= EFR_CTS; else efr &= ~EFR_CTS; uart_setreg(bas, REG_EFR, efr); uart_barrier(bas); uart_setreg(bas, REG_LCR, lcr); uart_barrier(bas); break; case UART_IOCTL_BAUD: lcr = uart_getreg(bas, REG_LCR); uart_setreg(bas, REG_LCR, lcr | LCR_DLAB); uart_barrier(bas); divisor = uart_getreg(bas, REG_DLL) | (uart_getreg(bas, REG_DLH) << 8); uart_barrier(bas); uart_setreg(bas, REG_LCR, lcr); uart_barrier(bas); baudrate = (divisor > 0) ? bas->rclk / divisor / 16 : 0; if (baudrate > 0) *(int*)data = baudrate; else error = ENXIO; break; default: error = EINVAL; break; } uart_unlock(sc->sc_hwmtx); return (error); } static int lpc_ns8250_bus_ipend(struct uart_softc *sc) { struct uart_bas *bas; struct lpc_ns8250_softc *lpc_ns8250; int ipend; uint8_t iir, lsr; lpc_ns8250 = (struct lpc_ns8250_softc *)sc; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); iir = uart_getreg(bas, REG_IIR); if (iir & IIR_NOPEND) { uart_unlock(sc->sc_hwmtx); return (0); } ipend = 0; if (iir & IIR_RXRDY) { lsr = uart_getreg(bas, REG_LSR); if (lsr & LSR_OE) ipend |= SER_INT_OVERRUN; if (lsr & LSR_BI) ipend |= SER_INT_BREAK; if (lsr & LSR_RXRDY) ipend |= SER_INT_RXREADY; } else { if (iir & IIR_TXRDY) { ipend |= SER_INT_TXIDLE; uart_setreg(bas, REG_IER, lpc_ns8250->ier); uart_barrier(bas); } else ipend |= SER_INT_SIGCHG; } if (ipend == 0) lpc_ns8250_clrint(bas); uart_unlock(sc->sc_hwmtx); return (ipend); } static int lpc_ns8250_bus_param(struct uart_softc *sc, int baudrate, int databits, int stopbits, int parity) { struct uart_bas *bas; int error; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); error = lpc_ns8250_param(bas, baudrate, databits, stopbits, parity); uart_unlock(sc->sc_hwmtx); return (error); } static int lpc_ns8250_bus_probe(struct uart_softc *sc) { struct lpc_ns8250_softc *lpc_ns8250; struct uart_bas *bas; int count, delay, error, limit; uint8_t lsr, mcr, ier; lpc_ns8250 = (struct lpc_ns8250_softc *)sc; bas = &sc->sc_bas; error = lpc_ns8250_probe(bas); if (error) return (error); mcr = MCR_IE; if (sc->sc_sysdev == NULL) { /* By using lpc_ns8250_init() we also set DTR and RTS. */ lpc_ns8250_init(bas, 115200, 8, 1, UART_PARITY_NONE); } else mcr |= MCR_DTR | MCR_RTS; error = lpc_ns8250_drain(bas, UART_DRAIN_TRANSMITTER); if (error) return (error); /* * Set loopback mode. This avoids having garbage on the wire and * also allows us send and receive data. We set DTR and RTS to * avoid the possibility that automatic flow-control prevents * any data from being sent. */ uart_setreg(bas, REG_MCR, MCR_LOOPBACK | MCR_IE | MCR_DTR | MCR_RTS); uart_barrier(bas); /* * Enable FIFOs. And check that the UART has them. If not, we're * done. Since this is the first time we enable the FIFOs, we reset * them. */ uart_setreg(bas, REG_FCR, FCR_ENABLE); uart_barrier(bas); if (!(uart_getreg(bas, REG_IIR) & IIR_FIFO_MASK)) { /* * NS16450 or INS8250. We don't bother to differentiate * between them. They're too old to be interesting. */ uart_setreg(bas, REG_MCR, mcr); uart_barrier(bas); sc->sc_rxfifosz = sc->sc_txfifosz = 1; device_set_desc(sc->sc_dev, "8250 or 16450 or compatible"); return (0); } uart_setreg(bas, REG_FCR, FCR_ENABLE | FCR_XMT_RST | FCR_RCV_RST); uart_barrier(bas); count = 0; delay = lpc_ns8250_delay(bas); /* We have FIFOs. Drain the transmitter and receiver. */ error = lpc_ns8250_drain(bas, UART_DRAIN_RECEIVER|UART_DRAIN_TRANSMITTER); if (error) { uart_setreg(bas, REG_MCR, mcr); uart_setreg(bas, REG_FCR, 0); uart_barrier(bas); goto done; } /* * We should have a sufficiently clean "pipe" to determine the * size of the FIFOs. We send as much characters as is reasonable * and wait for the overflow bit in the LSR register to be * asserted, counting the characters as we send them. Based on * that count we know the FIFO size. */ do { uart_setreg(bas, REG_DATA, 0); uart_barrier(bas); count++; limit = 30; lsr = 0; /* * LSR bits are cleared upon read, so we must accumulate * them to be able to test LSR_OE below. */ while (((lsr |= uart_getreg(bas, REG_LSR)) & LSR_TEMT) == 0 && --limit) DELAY(delay); if (limit == 0) { ier = uart_getreg(bas, REG_IER) & lpc_ns8250->ier_mask; uart_setreg(bas, REG_IER, ier); uart_setreg(bas, REG_MCR, mcr); uart_setreg(bas, REG_FCR, 0); uart_barrier(bas); count = 0; goto done; } } while ((lsr & LSR_OE) == 0 && count < 130); count--; uart_setreg(bas, REG_MCR, mcr); /* Reset FIFOs. */ lpc_ns8250_flush(bas, UART_FLUSH_RECEIVER|UART_FLUSH_TRANSMITTER); done: sc->sc_rxfifosz = 64; device_set_desc(sc->sc_dev, "LPC32x0 UART with FIFOs"); /* * Force the Tx FIFO size to 16 bytes for now. We don't program the * Tx trigger. Also, we assume that all data has been sent when the * interrupt happens. */ sc->sc_txfifosz = 16; #if 0 /* * XXX there are some issues related to hardware flow control and * it's likely that uart(4) is the cause. This basicly needs more * investigation, but we avoid using for hardware flow control * until then. */ /* 16650s or higher have automatic flow control. */ if (sc->sc_rxfifosz > 16) { sc->sc_hwiflow = 1; sc->sc_hwoflow = 1; } #endif return (0); } static int lpc_ns8250_bus_receive(struct uart_softc *sc) { struct uart_bas *bas; int xc; uint8_t lsr; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); lsr = uart_getreg(bas, REG_LSR); while (lsr & LSR_RXRDY) { if (uart_rx_full(sc)) { sc->sc_rxbuf[sc->sc_rxput] = UART_STAT_OVERRUN; break; } xc = uart_getreg(bas, REG_DATA); if (lsr & LSR_FE) xc |= UART_STAT_FRAMERR; if (lsr & LSR_PE) xc |= UART_STAT_PARERR; uart_rx_put(sc, xc); lsr = uart_getreg(bas, REG_LSR); } /* Discard everything left in the Rx FIFO. */ while (lsr & LSR_RXRDY) { (void)uart_getreg(bas, REG_DATA); uart_barrier(bas); lsr = uart_getreg(bas, REG_LSR); } uart_unlock(sc->sc_hwmtx); return (0); } static int lpc_ns8250_bus_setsig(struct uart_softc *sc, int sig) { struct lpc_ns8250_softc *lpc_ns8250 = (struct lpc_ns8250_softc*)sc; struct uart_bas *bas; uint32_t new, old; bas = &sc->sc_bas; do { old = sc->sc_hwsig; new = old; if (sig & SER_DDTR) { SIGCHG(sig & SER_DTR, new, SER_DTR, SER_DDTR); } if (sig & SER_DRTS) { SIGCHG(sig & SER_RTS, new, SER_RTS, SER_DRTS); } } while (!atomic_cmpset_32(&sc->sc_hwsig, old, new)); uart_lock(sc->sc_hwmtx); lpc_ns8250->mcr &= ~(MCR_DTR|MCR_RTS); if (new & SER_DTR) lpc_ns8250->mcr |= MCR_DTR; if (new & SER_RTS) lpc_ns8250->mcr |= MCR_RTS; uart_setreg(bas, REG_MCR, lpc_ns8250->mcr); uart_barrier(bas); uart_unlock(sc->sc_hwmtx); return (0); } static int lpc_ns8250_bus_transmit(struct uart_softc *sc) { struct lpc_ns8250_softc *lpc_ns8250 = (struct lpc_ns8250_softc*)sc; struct uart_bas *bas; int i; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); while ((uart_getreg(bas, REG_LSR) & LSR_THRE) == 0) ; for (i = 0; i < sc->sc_txdatasz; i++) { uart_setreg(bas, REG_DATA, sc->sc_txbuf[i]); uart_barrier(bas); } uart_setreg(bas, REG_IER, lpc_ns8250->ier | IER_ETXRDY); uart_barrier(bas); sc->sc_txbusy = 1; uart_unlock(sc->sc_hwmtx); return (0); } void lpc_ns8250_bus_grab(struct uart_softc *sc) { struct uart_bas *bas = &sc->sc_bas; /* * turn off all interrupts to enter polling mode. Leave the * saved mask alone. We'll restore whatever it was in ungrab. * All pending interupt signals are reset when IER is set to 0. */ uart_lock(sc->sc_hwmtx); uart_setreg(bas, REG_IER, 0); uart_barrier(bas); uart_unlock(sc->sc_hwmtx); } void lpc_ns8250_bus_ungrab(struct uart_softc *sc) { struct lpc_ns8250_softc *lpc_ns8250 = (struct lpc_ns8250_softc*)sc; struct uart_bas *bas = &sc->sc_bas; /* * Restore previous interrupt mask */ uart_lock(sc->sc_hwmtx); uart_setreg(bas, REG_IER, lpc_ns8250->ier); uart_barrier(bas); uart_unlock(sc->sc_hwmtx); } Index: projects/release-pkg/sys/dev/usb/usb_hid.c =================================================================== --- projects/release-pkg/sys/dev/usb/usb_hid.c (revision 295925) +++ projects/release-pkg/sys/dev/usb/usb_hid.c (revision 295926) @@ -1,924 +1,925 @@ /* $FreeBSD$ */ /* $NetBSD: hid.c,v 1.17 2001/11/13 06:24:53 lukem Exp $ */ /*- * Copyright (c) 1998 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Lennart Augustsson (lennart@augustsson.net) at * Carlstedt Research & Technology. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifdef USB_GLOBAL_INCLUDE_FILE #include USB_GLOBAL_INCLUDE_FILE #else #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define USB_DEBUG_VAR usb_debug #include #include #include #include #include #endif /* USB_GLOBAL_INCLUDE_FILE */ static void hid_clear_local(struct hid_item *); static uint8_t hid_get_byte(struct hid_data *s, const uint16_t wSize); #define MAXUSAGE 64 #define MAXPUSH 4 #define MAXID 16 struct hid_pos_data { int32_t rid; uint32_t pos; }; struct hid_data { const uint8_t *start; const uint8_t *end; const uint8_t *p; struct hid_item cur[MAXPUSH]; struct hid_pos_data last_pos[MAXID]; int32_t usages_min[MAXUSAGE]; int32_t usages_max[MAXUSAGE]; int32_t usage_last; /* last seen usage */ uint32_t loc_size; /* last seen size */ uint32_t loc_count; /* last seen count */ uint8_t kindset; /* we have 5 kinds so 8 bits are enough */ uint8_t pushlevel; /* current pushlevel */ uint8_t ncount; /* end usage item count */ uint8_t icount; /* current usage item count */ uint8_t nusage; /* end "usages_min/max" index */ uint8_t iusage; /* current "usages_min/max" index */ uint8_t ousage; /* current "usages_min/max" offset */ uint8_t susage; /* usage set flags */ }; /*------------------------------------------------------------------------* * hid_clear_local *------------------------------------------------------------------------*/ static void hid_clear_local(struct hid_item *c) { c->loc.count = 0; c->loc.size = 0; c->usage = 0; c->usage_minimum = 0; c->usage_maximum = 0; c->designator_index = 0; c->designator_minimum = 0; c->designator_maximum = 0; c->string_index = 0; c->string_minimum = 0; c->string_maximum = 0; c->set_delimiter = 0; } static void hid_switch_rid(struct hid_data *s, struct hid_item *c, int32_t next_rID) { uint8_t i; /* check for same report ID - optimise */ if (c->report_ID == next_rID) return; /* save current position for current rID */ if (c->report_ID == 0) { i = 0; } else { for (i = 1; i != MAXID; i++) { if (s->last_pos[i].rid == c->report_ID) break; if (s->last_pos[i].rid == 0) break; } } if (i != MAXID) { s->last_pos[i].rid = c->report_ID; s->last_pos[i].pos = c->loc.pos; } /* store next report ID */ c->report_ID = next_rID; /* lookup last position for next rID */ if (next_rID == 0) { i = 0; } else { for (i = 1; i != MAXID; i++) { if (s->last_pos[i].rid == next_rID) break; if (s->last_pos[i].rid == 0) break; } } if (i != MAXID) { s->last_pos[i].rid = next_rID; c->loc.pos = s->last_pos[i].pos; } else { DPRINTF("Out of RID entries, position is set to zero!\n"); c->loc.pos = 0; } } /*------------------------------------------------------------------------* * hid_start_parse *------------------------------------------------------------------------*/ struct hid_data * hid_start_parse(const void *d, usb_size_t len, int kindset) { struct hid_data *s; if ((kindset-1) & kindset) { DPRINTFN(0, "Only one bit can be " "set in the kindset\n"); return (NULL); } s = malloc(sizeof *s, M_TEMP, M_WAITOK | M_ZERO); s->start = s->p = d; s->end = ((const uint8_t *)d) + len; s->kindset = kindset; return (s); } /*------------------------------------------------------------------------* * hid_end_parse *------------------------------------------------------------------------*/ void hid_end_parse(struct hid_data *s) { if (s == NULL) return; free(s, M_TEMP); } /*------------------------------------------------------------------------* * get byte from HID descriptor *------------------------------------------------------------------------*/ static uint8_t hid_get_byte(struct hid_data *s, const uint16_t wSize) { const uint8_t *ptr; uint8_t retval; ptr = s->p; /* check if end is reached */ if (ptr == s->end) return (0); /* read out a byte */ retval = *ptr; /* check if data pointer can be advanced by "wSize" bytes */ if ((s->end - ptr) < wSize) ptr = s->end; else ptr += wSize; /* update pointer */ s->p = ptr; return (retval); } /*------------------------------------------------------------------------* * hid_get_item *------------------------------------------------------------------------*/ int hid_get_item(struct hid_data *s, struct hid_item *h) { struct hid_item *c; unsigned int bTag, bType, bSize; uint32_t oldpos; int32_t mask; int32_t dval; if (s == NULL) return (0); c = &s->cur[s->pushlevel]; top: /* check if there is an array of items */ if (s->icount < s->ncount) { /* get current usage */ if (s->iusage < s->nusage) { dval = s->usages_min[s->iusage] + s->ousage; c->usage = dval; s->usage_last = dval; if (dval == s->usages_max[s->iusage]) { s->iusage ++; s->ousage = 0; } else { s->ousage ++; } } else { DPRINTFN(1, "Using last usage\n"); dval = s->usage_last; } s->icount ++; /* * Only copy HID item, increment position and return * if correct kindset! */ if (s->kindset & (1 << c->kind)) { *h = *c; DPRINTFN(1, "%u,%u,%u\n", h->loc.pos, h->loc.size, h->loc.count); c->loc.pos += c->loc.size * c->loc.count; return (1); } } /* reset state variables */ s->icount = 0; s->ncount = 0; s->iusage = 0; s->nusage = 0; s->susage = 0; s->ousage = 0; hid_clear_local(c); /* get next item */ while (s->p != s->end) { bSize = hid_get_byte(s, 1); if (bSize == 0xfe) { /* long item */ bSize = hid_get_byte(s, 1); bSize |= hid_get_byte(s, 1) << 8; bTag = hid_get_byte(s, 1); bType = 0xff; /* XXX what should it be */ } else { /* short item */ bTag = bSize >> 4; bType = (bSize >> 2) & 3; bSize &= 3; if (bSize == 3) bSize = 4; } switch (bSize) { case 0: dval = 0; mask = 0; break; case 1: dval = (int8_t)hid_get_byte(s, 1); mask = 0xFF; break; case 2: dval = hid_get_byte(s, 1); dval |= hid_get_byte(s, 1) << 8; dval = (int16_t)dval; mask = 0xFFFF; break; case 4: dval = hid_get_byte(s, 1); dval |= hid_get_byte(s, 1) << 8; dval |= hid_get_byte(s, 1) << 16; dval |= hid_get_byte(s, 1) << 24; mask = 0xFFFFFFFF; break; default: dval = hid_get_byte(s, bSize); DPRINTFN(0, "bad length %u (data=0x%02x)\n", bSize, dval); continue; } switch (bType) { case 0: /* Main */ switch (bTag) { case 8: /* Input */ c->kind = hid_input; c->flags = dval; ret: c->loc.count = s->loc_count; c->loc.size = s->loc_size; if (c->flags & HIO_VARIABLE) { /* range check usage count */ if (c->loc.count > 255) { DPRINTFN(0, "Number of " - "items truncated to 255\n"); + "items(%u) truncated to 255\n", + (unsigned)(c->loc.count)); s->ncount = 255; } else s->ncount = c->loc.count; /* * The "top" loop will return * one and one item: */ c->loc.count = 1; } else { s->ncount = 1; } goto top; case 9: /* Output */ c->kind = hid_output; c->flags = dval; goto ret; case 10: /* Collection */ c->kind = hid_collection; c->collection = dval; c->collevel++; c->usage = s->usage_last; *h = *c; return (1); case 11: /* Feature */ c->kind = hid_feature; c->flags = dval; goto ret; case 12: /* End collection */ c->kind = hid_endcollection; if (c->collevel == 0) { DPRINTFN(0, "invalid end collection\n"); return (0); } c->collevel--; *h = *c; return (1); default: DPRINTFN(0, "Main bTag=%d\n", bTag); break; } break; case 1: /* Global */ switch (bTag) { case 0: c->_usage_page = dval << 16; break; case 1: c->logical_minimum = dval; break; case 2: c->logical_maximum = dval; break; case 3: c->physical_minimum = dval; break; case 4: c->physical_maximum = dval; break; case 5: c->unit_exponent = dval; break; case 6: c->unit = dval; break; case 7: /* mask because value is unsigned */ s->loc_size = dval & mask; break; case 8: hid_switch_rid(s, c, dval & mask); break; case 9: /* mask because value is unsigned */ s->loc_count = dval & mask; break; case 10: /* Push */ s->pushlevel ++; if (s->pushlevel < MAXPUSH) { s->cur[s->pushlevel] = *c; /* store size and count */ c->loc.size = s->loc_size; c->loc.count = s->loc_count; /* update current item pointer */ c = &s->cur[s->pushlevel]; } else { DPRINTFN(0, "Cannot push " "item @ %d\n", s->pushlevel); } break; case 11: /* Pop */ s->pushlevel --; if (s->pushlevel < MAXPUSH) { /* preserve position */ oldpos = c->loc.pos; c = &s->cur[s->pushlevel]; /* restore size and count */ s->loc_size = c->loc.size; s->loc_count = c->loc.count; /* set default item location */ c->loc.pos = oldpos; c->loc.size = 0; c->loc.count = 0; } else { DPRINTFN(0, "Cannot pop " "item @ %d\n", s->pushlevel); } break; default: DPRINTFN(0, "Global bTag=%d\n", bTag); break; } break; case 2: /* Local */ switch (bTag) { case 0: if (bSize != 4) dval = (dval & mask) | c->_usage_page; /* set last usage, in case of a collection */ s->usage_last = dval; if (s->nusage < MAXUSAGE) { s->usages_min[s->nusage] = dval; s->usages_max[s->nusage] = dval; s->nusage ++; } else { DPRINTFN(0, "max usage reached\n"); } /* clear any pending usage sets */ s->susage = 0; break; case 1: s->susage |= 1; if (bSize != 4) dval = (dval & mask) | c->_usage_page; c->usage_minimum = dval; goto check_set; case 2: s->susage |= 2; if (bSize != 4) dval = (dval & mask) | c->_usage_page; c->usage_maximum = dval; check_set: if (s->susage != 3) break; /* sanity check */ if ((s->nusage < MAXUSAGE) && (c->usage_minimum <= c->usage_maximum)) { /* add usage range */ s->usages_min[s->nusage] = c->usage_minimum; s->usages_max[s->nusage] = c->usage_maximum; s->nusage ++; } else { DPRINTFN(0, "Usage set dropped\n"); } s->susage = 0; break; case 3: c->designator_index = dval; break; case 4: c->designator_minimum = dval; break; case 5: c->designator_maximum = dval; break; case 7: c->string_index = dval; break; case 8: c->string_minimum = dval; break; case 9: c->string_maximum = dval; break; case 10: c->set_delimiter = dval; break; default: DPRINTFN(0, "Local bTag=%d\n", bTag); break; } break; default: DPRINTFN(0, "default bType=%d\n", bType); break; } } return (0); } /*------------------------------------------------------------------------* * hid_report_size *------------------------------------------------------------------------*/ int hid_report_size(const void *buf, usb_size_t len, enum hid_kind k, uint8_t *id) { struct hid_data *d; struct hid_item h; uint32_t temp; uint32_t hpos; uint32_t lpos; uint8_t any_id; any_id = 0; hpos = 0; lpos = 0xFFFFFFFF; for (d = hid_start_parse(buf, len, 1 << k); hid_get_item(d, &h);) { if (h.kind == k) { /* check for ID-byte presense */ if ((h.report_ID != 0) && !any_id) { if (id != NULL) *id = h.report_ID; any_id = 1; } /* compute minimum */ if (lpos > h.loc.pos) lpos = h.loc.pos; /* compute end position */ temp = h.loc.pos + (h.loc.size * h.loc.count); /* compute maximum */ if (hpos < temp) hpos = temp; } } hid_end_parse(d); /* safety check - can happen in case of currupt descriptors */ if (lpos > hpos) temp = 0; else temp = hpos - lpos; /* check for ID byte */ if (any_id) temp += 8; else if (id != NULL) *id = 0; /* return length in bytes rounded up */ return ((temp + 7) / 8); } /*------------------------------------------------------------------------* * hid_locate *------------------------------------------------------------------------*/ int hid_locate(const void *desc, usb_size_t size, int32_t u, enum hid_kind k, uint8_t index, struct hid_location *loc, uint32_t *flags, uint8_t *id) { struct hid_data *d; struct hid_item h; for (d = hid_start_parse(desc, size, 1 << k); hid_get_item(d, &h);) { if (h.kind == k && !(h.flags & HIO_CONST) && h.usage == u) { if (index--) continue; if (loc != NULL) *loc = h.loc; if (flags != NULL) *flags = h.flags; if (id != NULL) *id = h.report_ID; hid_end_parse(d); return (1); } } if (loc != NULL) loc->size = 0; if (flags != NULL) *flags = 0; if (id != NULL) *id = 0; hid_end_parse(d); return (0); } /*------------------------------------------------------------------------* * hid_get_data *------------------------------------------------------------------------*/ static uint32_t hid_get_data_sub(const uint8_t *buf, usb_size_t len, struct hid_location *loc, int is_signed) { uint32_t hpos = loc->pos; uint32_t hsize = loc->size; uint32_t data; uint32_t rpos; uint8_t n; DPRINTFN(11, "hid_get_data: loc %d/%d\n", hpos, hsize); /* Range check and limit */ if (hsize == 0) return (0); if (hsize > 32) hsize = 32; /* Get data in a safe way */ data = 0; rpos = (hpos / 8); n = (hsize + 7) / 8; rpos += n; while (n--) { rpos--; if (rpos < len) data |= buf[rpos] << (8 * n); } /* Correctly shift down data */ data = (data >> (hpos % 8)); n = 32 - hsize; /* Mask and sign extend in one */ if (is_signed != 0) data = (int32_t)((int32_t)data << n) >> n; else data = (uint32_t)((uint32_t)data << n) >> n; DPRINTFN(11, "hid_get_data: loc %d/%d = %lu\n", loc->pos, loc->size, (long)data); return (data); } int32_t hid_get_data(const uint8_t *buf, usb_size_t len, struct hid_location *loc) { return (hid_get_data_sub(buf, len, loc, 1)); } uint32_t hid_get_data_unsigned(const uint8_t *buf, usb_size_t len, struct hid_location *loc) { return (hid_get_data_sub(buf, len, loc, 0)); } /*------------------------------------------------------------------------* * hid_put_data *------------------------------------------------------------------------*/ void hid_put_data_unsigned(uint8_t *buf, usb_size_t len, struct hid_location *loc, unsigned int value) { uint32_t hpos = loc->pos; uint32_t hsize = loc->size; uint64_t data; uint64_t mask; uint32_t rpos; uint8_t n; DPRINTFN(11, "hid_put_data: loc %d/%d = %u\n", hpos, hsize, value); /* Range check and limit */ if (hsize == 0) return; if (hsize > 32) hsize = 32; /* Put data in a safe way */ rpos = (hpos / 8); n = (hsize + 7) / 8; data = ((uint64_t)value) << (hpos % 8); mask = ((1ULL << hsize) - 1ULL) << (hpos % 8); rpos += n; while (n--) { rpos--; if (rpos < len) { buf[rpos] &= ~(mask >> (8 * n)); buf[rpos] |= (data >> (8 * n)); } } } /*------------------------------------------------------------------------* * hid_is_collection *------------------------------------------------------------------------*/ int hid_is_collection(const void *desc, usb_size_t size, int32_t usage) { struct hid_data *hd; struct hid_item hi; int err; hd = hid_start_parse(desc, size, hid_input); if (hd == NULL) return (0); while ((err = hid_get_item(hd, &hi))) { if (hi.kind == hid_collection && hi.usage == usage) break; } hid_end_parse(hd); return (err); } /*------------------------------------------------------------------------* * hid_get_descriptor_from_usb * * This function will search for a HID descriptor between two USB * interface descriptors. * * Return values: * NULL: No more HID descriptors. * Else: Pointer to HID descriptor. *------------------------------------------------------------------------*/ struct usb_hid_descriptor * hid_get_descriptor_from_usb(struct usb_config_descriptor *cd, struct usb_interface_descriptor *id) { struct usb_descriptor *desc = (void *)id; if (desc == NULL) { return (NULL); } while ((desc = usb_desc_foreach(cd, desc))) { if ((desc->bDescriptorType == UDESC_HID) && (desc->bLength >= USB_HID_DESCRIPTOR_SIZE(0))) { return (void *)desc; } if (desc->bDescriptorType == UDESC_INTERFACE) { break; } } return (NULL); } /*------------------------------------------------------------------------* * usbd_req_get_hid_desc * * This function will read out an USB report descriptor from the USB * device. * * Return values: * NULL: Failure. * Else: Success. The pointer should eventually be passed to free(). *------------------------------------------------------------------------*/ usb_error_t usbd_req_get_hid_desc(struct usb_device *udev, struct mtx *mtx, void **descp, uint16_t *sizep, struct malloc_type *mem, uint8_t iface_index) { struct usb_interface *iface = usbd_get_iface(udev, iface_index); struct usb_hid_descriptor *hid; usb_error_t err; if ((iface == NULL) || (iface->idesc == NULL)) { return (USB_ERR_INVAL); } hid = hid_get_descriptor_from_usb (usbd_get_config_descriptor(udev), iface->idesc); if (hid == NULL) { return (USB_ERR_IOERROR); } *sizep = UGETW(hid->descrs[0].wDescriptorLength); if (*sizep == 0) { return (USB_ERR_IOERROR); } if (mtx) mtx_unlock(mtx); *descp = malloc(*sizep, mem, M_ZERO | M_WAITOK); if (mtx) mtx_lock(mtx); if (*descp == NULL) { return (USB_ERR_NOMEM); } err = usbd_req_get_report_descriptor (udev, mtx, *descp, *sizep, iface_index); if (err) { free(*descp, mem); *descp = NULL; return (err); } return (USB_ERR_NORMAL_COMPLETION); } /*------------------------------------------------------------------------* * hid_is_mouse * * This function will decide if a USB descriptor belongs to a USB mouse. * * Return values: * Zero: Not a USB mouse. * Else: Is a USB mouse. *------------------------------------------------------------------------*/ int hid_is_mouse(const void *d_ptr, uint16_t d_len) { struct hid_data *hd; struct hid_item hi; int mdepth; int found; hd = hid_start_parse(d_ptr, d_len, 1 << hid_input); if (hd == NULL) return (0); mdepth = 0; found = 0; while (hid_get_item(hd, &hi)) { switch (hi.kind) { case hid_collection: if (mdepth != 0) mdepth++; else if (hi.collection == 1 && hi.usage == HID_USAGE2(HUP_GENERIC_DESKTOP, HUG_MOUSE)) mdepth++; break; case hid_endcollection: if (mdepth != 0) mdepth--; break; case hid_input: if (mdepth == 0) break; if (hi.usage == HID_USAGE2(HUP_GENERIC_DESKTOP, HUG_X) && (hi.flags & (HIO_CONST|HIO_RELATIVE)) == HIO_RELATIVE) found++; if (hi.usage == HID_USAGE2(HUP_GENERIC_DESKTOP, HUG_Y) && (hi.flags & (HIO_CONST|HIO_RELATIVE)) == HIO_RELATIVE) found++; break; default: break; } } hid_end_parse(hd); return (found); } /*------------------------------------------------------------------------* * hid_is_keyboard * * This function will decide if a USB descriptor belongs to a USB keyboard. * * Return values: * Zero: Not a USB keyboard. * Else: Is a USB keyboard. *------------------------------------------------------------------------*/ int hid_is_keyboard(const void *d_ptr, uint16_t d_len) { if (hid_is_collection(d_ptr, d_len, HID_USAGE2(HUP_GENERIC_DESKTOP, HUG_KEYBOARD))) return (1); return (0); } Index: projects/release-pkg/sys/dev/usb/usbdevs =================================================================== --- projects/release-pkg/sys/dev/usb/usbdevs (revision 295925) +++ projects/release-pkg/sys/dev/usb/usbdevs (revision 295926) @@ -1,4700 +1,4701 @@ $FreeBSD$ /* $NetBSD: usbdevs,v 1.392 2004/12/29 08:38:44 imp Exp $ */ /*- * Copyright (c) 1998-2004 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Lennart Augustsson (lennart@augustsson.net) at * Carlstedt Research & Technology. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * List of known USB vendors * * USB.org publishes a VID list of USB-IF member companies at * http://www.usb.org/developers/tools * Note that it does not show companies that have obtained a Vendor ID * without becoming full members. * * Please note that these IDs do not do anything. Adding an ID here and * regenerating the usbdevs.h and usbdevs_data.h only makes a symbolic name * available to the source code and does not change any functionality, nor * does it make your device available to a specific driver. * It will however make the descriptive string available if a device does not * provide the string itself. * * After adding a vendor ID VNDR and a product ID PRDCT you will have the * following extra defines: * #define USB_VENDOR_VNDR 0x???? * #define USB_PRODUCT_VNDR_PRDCT 0x???? * * You may have to add these defines to the respective probe routines to * make the device recognised by the appropriate device driver. */ vendor UNKNOWN1 0x0053 Unknown vendor vendor UNKNOWN2 0x0105 Unknown vendor vendor EGALAX2 0x0123 eGalax, Inc. vendor CHIPSBANK 0x0204 Chipsbank Microelectronics Co. vendor HUMAX 0x02ad HUMAX vendor LTS 0x0386 LTS vendor BWCT 0x03da Bernd Walter Computer Technology vendor AOX 0x03e8 AOX vendor THESYS 0x03e9 Thesys vendor DATABROADCAST 0x03ea Data Broadcasting vendor ATMEL 0x03eb Atmel vendor IWATSU 0x03ec Iwatsu America vendor MITSUMI 0x03ee Mitsumi vendor HP 0x03f0 Hewlett Packard vendor GENOA 0x03f1 Genoa vendor OAK 0x03f2 Oak vendor ADAPTEC 0x03f3 Adaptec vendor DIEBOLD 0x03f4 Diebold vendor SIEMENSELECTRO 0x03f5 Siemens Electromechanical vendor EPSONIMAGING 0x03f8 Epson Imaging vendor KEYTRONIC 0x03f9 KeyTronic vendor OPTI 0x03fb OPTi vendor ELITEGROUP 0x03fc Elitegroup vendor XILINX 0x03fd Xilinx vendor FARALLON 0x03fe Farallon Communications vendor NATIONAL 0x0400 National Semiconductor vendor NATIONALREG 0x0401 National Registry vendor ACERLABS 0x0402 Acer Labs vendor FTDI 0x0403 Future Technology Devices vendor NCR 0x0404 NCR vendor SYNOPSYS2 0x0405 Synopsys vendor FUJITSUICL 0x0406 Fujitsu-ICL vendor FUJITSU2 0x0407 Fujitsu Personal Systems vendor QUANTA 0x0408 Quanta vendor NEC 0x0409 NEC vendor KODAK 0x040a Eastman Kodak vendor WELTREND 0x040b Weltrend vendor VIA 0x040d VIA vendor MCCI 0x040e MCCI vendor MELCO 0x0411 Melco vendor LEADTEK 0x0413 Leadtek vendor WINBOND 0x0416 Winbond vendor PHOENIX 0x041a Phoenix vendor CREATIVE 0x041e Creative Labs vendor NOKIA 0x0421 Nokia vendor ADI 0x0422 ADI Systems vendor CATC 0x0423 Computer Access Technology vendor SMC2 0x0424 Standard Microsystems vendor MOTOROLA_HK 0x0425 Motorola HK vendor GRAVIS 0x0428 Advanced Gravis Computer vendor CIRRUSLOGIC 0x0429 Cirrus Logic vendor INNOVATIVE 0x042c Innovative Semiconductors vendor MOLEX 0x042f Molex vendor SUN 0x0430 Sun Microsystems vendor UNISYS 0x0432 Unisys vendor TAUGA 0x0436 Taugagreining HF vendor AMD 0x0438 Advanced Micro Devices vendor LEXMARK 0x043d Lexmark International vendor LG 0x043e LG Electronics vendor NANAO 0x0440 NANAO vendor GATEWAY 0x0443 Gateway 2000 vendor NMB 0x0446 NMB vendor ALPS 0x044e Alps Electric vendor THRUST 0x044f Thrustmaster vendor TI 0x0451 Texas Instruments vendor ANALOGDEVICES 0x0456 Analog Devices vendor SIS 0x0457 Silicon Integrated Systems Corp. vendor KYE 0x0458 KYE Systems vendor DIAMOND2 0x045a Diamond (Supra) vendor RENESAS 0x045b Renesas vendor MICROSOFT 0x045e Microsoft vendor PRIMAX 0x0461 Primax Electronics vendor MGE 0x0463 MGE UPS Systems vendor AMP 0x0464 AMP vendor CHERRY 0x046a Cherry Mikroschalter vendor MEGATRENDS 0x046b American Megatrends vendor LOGITECH 0x046d Logitech vendor BTC 0x046e Behavior Tech. Computer vendor PHILIPS 0x0471 Philips vendor SUN2 0x0472 Sun Microsystems (offical) vendor SANYO 0x0474 Sanyo Electric vendor SEAGATE 0x0477 Seagate vendor CONNECTIX 0x0478 Connectix vendor SEMTECH 0x047a Semtech vendor KENSINGTON 0x047d Kensington vendor LUCENT 0x047e Lucent vendor PLANTRONICS 0x047f Plantronics vendor KYOCERA 0x0482 Kyocera Wireless Corp. vendor STMICRO 0x0483 STMicroelectronics vendor FOXCONN 0x0489 Foxconn vendor MEIZU 0x0492 Meizu Electronics vendor YAMAHA 0x0499 YAMAHA vendor COMPAQ 0x049f Compaq vendor HITACHI 0x04a4 Hitachi vendor ACERP 0x04a5 Acer Peripherals vendor DAVICOM 0x04a6 Davicom vendor VISIONEER 0x04a7 Visioneer vendor CANON 0x04a9 Canon vendor NIKON 0x04b0 Nikon vendor PAN 0x04b1 Pan International vendor IBM 0x04b3 IBM vendor CYPRESS 0x04b4 Cypress Semiconductor vendor ROHM 0x04b5 ROHM vendor COMPAL 0x04b7 Compal vendor EPSON 0x04b8 Seiko Epson vendor RAINBOW 0x04b9 Rainbow Technologies vendor IODATA 0x04bb I-O Data vendor TDK 0x04bf TDK vendor 3COMUSR 0x04c1 U.S. Robotics vendor METHODE 0x04c2 Methode Electronics Far East vendor MAXISWITCH 0x04c3 Maxi Switch vendor LOCKHEEDMER 0x04c4 Lockheed Martin Energy Research vendor FUJITSU 0x04c5 Fujitsu vendor TOSHIBAAM 0x04c6 Toshiba America vendor MICROMACRO 0x04c7 Micro Macro Technologies vendor KONICA 0x04c8 Konica vendor LITEON 0x04ca Lite-On Technology vendor FUJIPHOTO 0x04cb Fuji Photo Film vendor PHILIPSSEMI 0x04cc Philips Semiconductors vendor TATUNG 0x04cd Tatung Co. Of America vendor SCANLOGIC 0x04ce ScanLogic vendor MYSON 0x04cf Myson Technology vendor DIGI2 0x04d0 Digi vendor ITTCANON 0x04d1 ITT Canon vendor ALTEC 0x04d2 Altec Lansing vendor LSI 0x04d4 LSI vendor MENTORGRAPHICS 0x04d6 Mentor Graphics vendor ITUNERNET 0x04d8 I-Tuner Networks vendor HOLTEK 0x04d9 Holtek Semiconductor, Inc. vendor PANASONIC 0x04da Panasonic (Matsushita) vendor HUANHSIN 0x04dc Huan Hsin vendor SHARP 0x04dd Sharp vendor IIYAMA 0x04e1 Iiyama vendor SHUTTLE 0x04e6 Shuttle Technology vendor ELO 0x04e7 Elo TouchSystems vendor SAMSUNG 0x04e8 Samsung Electronics vendor NORTHSTAR 0x04eb Northstar vendor TOKYOELECTRON 0x04ec Tokyo Electron vendor ANNABOOKS 0x04ed Annabooks vendor JVC 0x04f1 JVC vendor CHICONY 0x04f2 Chicony Electronics vendor ELAN 0x04f3 Elan vendor NEWNEX 0x04f7 Newnex vendor BROTHER 0x04f9 Brother Industries vendor DALLAS 0x04fa Dallas Semiconductor vendor AIPTEK2 0x04fc AIPTEK International vendor PFU 0x04fe PFU vendor FUJIKURA 0x0501 Fujikura/DDK vendor ACER 0x0502 Acer vendor 3COM 0x0506 3Com vendor HOSIDEN 0x0507 Hosiden Corporation vendor AZTECH 0x0509 Aztech Systems vendor BELKIN 0x050d Belkin Components vendor KAWATSU 0x050f Kawatsu Semiconductor vendor FCI 0x0514 FCI vendor LONGWELL 0x0516 Longwell vendor COMPOSITE 0x0518 Composite vendor STAR 0x0519 Star Micronics vendor APC 0x051d American Power Conversion vendor SCIATLANTA 0x051e Scientific Atlanta vendor TSM 0x0520 TSM vendor CONNECTEK 0x0522 Advanced Connectek USA vendor NETCHIP 0x0525 NetChip Technology vendor ALTRA 0x0527 ALTRA vendor ATI 0x0528 ATI Technologies vendor AKS 0x0529 Aladdin Knowledge Systems vendor TEKOM 0x052b Tekom vendor CANONDEV 0x052c Canon vendor WACOMTECH 0x0531 Wacom vendor INVENTEC 0x0537 Inventec vendor SHYHSHIUN 0x0539 Shyh Shiun Terminals vendor PREHWERKE 0x053a Preh Werke Gmbh & Co. KG vendor SYNOPSYS 0x053f Synopsys vendor UNIACCESS 0x0540 Universal Access vendor VIEWSONIC 0x0543 ViewSonic vendor XIRLINK 0x0545 Xirlink vendor ANCHOR 0x0547 Anchor Chips vendor SONY 0x054c Sony vendor FUJIXEROX 0x0550 Fuji Xerox vendor VISION 0x0553 VLSI Vision vendor ASAHIKASEI 0x0556 Asahi Kasei Microsystems vendor ATEN 0x0557 ATEN International vendor SAMSUNG2 0x055d Samsung Electronics vendor MUSTEK 0x055f Mustek Systems vendor TELEX 0x0562 Telex Communications vendor CHINON 0x0564 Chinon vendor PERACOM 0x0565 Peracom Networks vendor ALCOR2 0x0566 Alcor Micro vendor XYRATEX 0x0567 Xyratex vendor WACOM 0x056a WACOM vendor ETEK 0x056c e-TEK Labs vendor EIZO 0x056d EIZO vendor ELECOM 0x056e Elecom vendor CONEXANT 0x0572 Conexant vendor HAUPPAUGE 0x0573 Hauppauge Computer Works vendor BAFO 0x0576 BAFO/Quality Computer Accessories vendor YEDATA 0x057b Y-E Data vendor AVM 0x057c AVM vendor QUICKSHOT 0x057f Quickshot vendor ROLAND 0x0582 Roland vendor ROCKFIRE 0x0583 Rockfire vendor RATOC 0x0584 RATOC Systems vendor ZYXEL 0x0586 ZyXEL Communication vendor INFINEON 0x058b Infineon vendor MICREL 0x058d Micrel vendor ALCOR 0x058f Alcor Micro vendor OMRON 0x0590 OMRON vendor ZORAN 0x0595 Zoran Microelectronics vendor NIIGATA 0x0598 Niigata vendor IOMEGA 0x059b Iomega vendor ATREND 0x059c A-Trend Technology vendor AID 0x059d Advanced Input Devices vendor LACIE 0x059f LaCie vendor FUJIFILM 0x05a2 Fuji Film vendor ARC 0x05a3 ARC vendor ORTEK 0x05a4 Ortek vendor CISCOLINKSYS3 0x05a6 Cisco-Linksys vendor BOSE 0x05a7 Bose vendor OMNIVISION 0x05a9 OmniVision vendor INSYSTEM 0x05ab In-System Design vendor APPLE 0x05ac Apple Computer vendor YCCABLE 0x05ad Y.C. Cable vendor DIGITALPERSONA 0x05ba DigitalPersona vendor 3G 0x05bc 3G Green Green Globe vendor RAFI 0x05bd RAFI vendor TYCO 0x05be Tyco vendor KAWASAKI 0x05c1 Kawasaki vendor DIGI 0x05c5 Digi International vendor QUALCOMM2 0x05c6 Qualcomm vendor QTRONIX 0x05c7 Qtronix vendor FOXLINK 0x05c8 Foxlink vendor RICOH 0x05ca Ricoh vendor ELSA 0x05cc ELSA vendor SCIWORX 0x05ce sci-worx vendor BRAINBOXES 0x05d1 Brainboxes Limited vendor ULTIMA 0x05d8 Ultima vendor AXIOHM 0x05d9 Axiohm Transaction Solutions vendor MICROTEK 0x05da Microtek vendor SUNTAC 0x05db SUN Corporation vendor LEXAR 0x05dc Lexar Media vendor ADDTRON 0x05dd Addtron vendor SYMBOL 0x05e0 Symbol Technologies vendor SYNTEK 0x05e1 Syntek vendor GENESYS 0x05e3 Genesys Logic vendor FUJI 0x05e5 Fuji Electric vendor KEITHLEY 0x05e6 Keithley Instruments vendor EIZONANAO 0x05e7 EIZO Nanao vendor KLSI 0x05e9 Kawasaki LSI vendor FFC 0x05eb FFC vendor ANKO 0x05ef Anko Electronic vendor PIENGINEERING 0x05f3 P.I. Engineering vendor AOC 0x05f6 AOC International vendor CHIC 0x05fe Chic Technology vendor BARCO 0x0600 Barco Display Systems vendor BRIDGE 0x0607 Bridge Information vendor SOLIDYEAR 0x060b Solid Year vendor BIORAD 0x0614 Bio-Rad Laboratories vendor MACALLY 0x0618 Macally vendor ACTLABS 0x061c Act Labs vendor ALARIS 0x0620 Alaris vendor APEX 0x0624 Apex vendor CREATIVE3 0x062a Creative Labs vendor MICRON 0x0634 Micron Technology vendor VIVITAR 0x0636 Vivitar vendor GUNZE 0x0637 Gunze Electronics USA vendor AVISION 0x0638 Avision vendor TEAC 0x0644 TEAC vendor ACTON 0x0647 Acton Research Corp. vendor OPTO 0x065a Optoelectronics Co., Ltd vendor SGI 0x065e Silicon Graphics vendor SANWASUPPLY 0x0663 Sanwa Supply vendor MEGATEC 0x0665 Megatec vendor LINKSYS 0x066b Linksys vendor ACERSA 0x066e Acer Semiconductor America vendor SIGMATEL 0x066f Sigmatel vendor DRAYTEK 0x0675 DrayTek vendor AIWA 0x0677 Aiwa vendor ACARD 0x0678 ACARD Technology vendor PROLIFIC 0x067b Prolific Technology vendor SIEMENS 0x067c Siemens vendor AVANCELOGIC 0x0680 Avance Logic vendor SIEMENS2 0x0681 Siemens vendor MINOLTA 0x0686 Minolta vendor CHPRODUCTS 0x068e CH Products vendor HAGIWARA 0x0693 Hagiwara Sys-Com vendor CTX 0x0698 Chuntex vendor ASKEY 0x069a Askey Computer vendor SAITEK 0x06a3 Saitek vendor ALCATELT 0x06b9 Alcatel Telecom vendor AGFA 0x06bd AGFA-Gevaert vendor ASIAMD 0x06be Asia Microelectronic Development vendor BIZLINK 0x06c4 Bizlink International vendor KEYSPAN 0x06cd Keyspan / InnoSys Inc. vendor CONTEC 0x06ce Contec products vendor AASHIMA 0x06d6 Aashima Technology vendor LIEBERT 0x06da Liebert vendor MULTITECH 0x06e0 MultiTech vendor ADS 0x06e1 ADS Technologies vendor ALCATELM 0x06e4 Alcatel Microelectronics vendor SIRIUS 0x06ea Sirius Technologies vendor GUILLEMOT 0x06f8 Guillemot vendor BOSTON 0x06fd Boston Acoustics vendor SMC 0x0707 Standard Microsystems vendor PUTERCOM 0x0708 Putercom vendor MCT 0x0711 MCT vendor IMATION 0x0718 Imation vendor TECLAST 0x071b Teclast vendor SONYERICSSON 0x0731 Sony Ericsson vendor EICON 0x0734 Eicon Networks vendor SYNTECH 0x0745 Syntech Information vendor DIGITALSTREAM 0x074e Digital Stream vendor AUREAL 0x0755 Aureal Semiconductor vendor MAUDIO 0x0763 M-Audio vendor CYBERPOWER 0x0764 Cyber Power Systems, Inc. vendor SURECOM 0x0769 Surecom Technology vendor HIDGLOBAL 0x076b HID Global vendor LINKSYS2 0x077b Linksys vendor GRIFFIN 0x077d Griffin Technology vendor SANDISK 0x0781 SanDisk vendor JENOPTIK 0x0784 Jenoptik vendor LOGITEC 0x0789 Logitec vendor NOKIA2 0x078b Nokia vendor BRIMAX 0x078e Brimax vendor AXIS 0x0792 Axis Communications vendor ABL 0x0794 ABL Electronics vendor SAGEM 0x079b Sagem vendor SUNCOMM 0x079c Sun Communications, Inc. vendor ALFADATA 0x079d Alfadata Computer vendor NATIONALTECH 0x07a2 National Technical Systems vendor ONNTO 0x07a3 Onnto vendor BE 0x07a4 Be vendor ADMTEK 0x07a6 ADMtek vendor COREGA 0x07aa Corega vendor FREECOM 0x07ab Freecom vendor MICROTECH 0x07af Microtech vendor GENERALINSTMNTS 0x07b2 General Instruments (Motorola) vendor OLYMPUS 0x07b4 Olympus vendor ABOCOM 0x07b8 AboCom Systems vendor KEISOKUGIKEN 0x07c1 Keisokugiken vendor ONSPEC 0x07c4 OnSpec vendor APG 0x07c5 APG Cash Drawer vendor BUG 0x07c8 B.U.G. vendor ALLIEDTELESYN 0x07c9 Allied Telesyn International vendor AVERMEDIA 0x07ca AVerMedia Technologies vendor SIIG 0x07cc SIIG vendor CASIO 0x07cf CASIO vendor DLINK2 0x07d1 D-Link vendor APTIO 0x07d2 Aptio Products vendor ARASAN 0x07da Arasan Chip Systems vendor ALLIEDCABLE 0x07e6 Allied Cable vendor STSN 0x07ef STSN vendor CENTURY 0x07f7 Century Corp vendor NEWLINK 0x07ff NEWlink vendor MAGTEK 0x0801 Mag-Tek vendor ZOOM 0x0803 Zoom Telephonics vendor PCS 0x0810 Personal Communication Systems vendor ALPHASMART 0x081e AlphaSmart, Inc. vendor BROADLOGIC 0x0827 BroadLogic vendor HANDSPRING 0x082d Handspring vendor PALM 0x0830 Palm Computing vendor SOURCENEXT 0x0833 SOURCENEXT vendor ACTIONSTAR 0x0835 Action Star Enterprise vendor SAMSUNG_TECHWIN 0x0839 Samsung Techwin vendor ACCTON 0x083a Accton Technology vendor DIAMOND 0x0841 Diamond vendor NETGEAR 0x0846 BayNETGEAR vendor TOPRE 0x0853 Topre Corporation vendor ACTIVEWIRE 0x0854 ActiveWire vendor BBELECTRONICS 0x0856 B&B Electronics vendor PORTGEAR 0x085a PortGear vendor NETGEAR2 0x0864 Netgear vendor SYSTEMTALKS 0x086e System Talks vendor METRICOM 0x0870 Metricom vendor ADESSOKBTEK 0x087c ADESSO/Kbtek America vendor JATON 0x087d Jaton vendor APT 0x0880 APT Technologies vendor BOCARESEARCH 0x0885 Boca Research vendor ANDREA 0x08a8 Andrea Electronics vendor BURRBROWN 0x08bb Burr-Brown Japan vendor 2WIRE 0x08c8 2Wire vendor AIPTEK 0x08ca AIPTEK International vendor SMARTBRIDGES 0x08d1 SmartBridges vendor FUJITSUSIEMENS 0x08d4 Fujitsu-Siemens vendor BILLIONTON 0x08dd Billionton Systems vendor GEMALTO 0x08e6 Gemalto SA vendor EXTENDED 0x08e9 Extended Systems vendor MSYSTEMS 0x08ec M-Systems vendor DIGIANSWER 0x08fd Digianswer vendor AUTHENTEC 0x08ff AuthenTec vendor AUDIOTECHNICA 0x0909 Audio-Technica vendor TRUMPION 0x090a Trumpion Microelectronics vendor FEIYA 0x090c Feiya vendor ALATION 0x0910 Alation Systems vendor GLOBESPAN 0x0915 Globespan vendor CONCORDCAMERA 0x0919 Concord Camera vendor GARMIN 0x091e Garmin International vendor GOHUBS 0x0921 GoHubs vendor DYMO 0x0922 DYMO vendor XEROX 0x0924 Xerox vendor BIOMETRIC 0x0929 American Biometric Company vendor TOSHIBA 0x0930 Toshiba vendor PLEXTOR 0x093b Plextor vendor INTREPIDCS 0x093c Intrepid vendor YANO 0x094f Yano vendor KINGSTON 0x0951 Kingston Technology vendor BLUEWATER 0x0956 BlueWater Systems vendor AGILENT 0x0957 Agilent Technologies vendor GUDE 0x0959 Gude ADS vendor PORTSMITH 0x095a Portsmith vendor ACERW 0x0967 Acer vendor ADIRONDACK 0x0976 Adirondack Wire & Cable vendor BECKHOFF 0x0978 Beckhoff vendor MINDSATWORK 0x097a Minds At Work vendor POINTCHIPS 0x09a6 PointChips vendor INTERSIL 0x09aa Intersil vendor ALTIUS 0x09b3 Altius Solutions vendor ARRIS 0x09c1 Arris Interactive vendor ACTIVCARD 0x09c3 ACTIVCARD vendor ACTISYS 0x09c4 ACTiSYS vendor NOVATEL2 0x09d7 Novatel Wireless vendor AFOURTECH 0x09da A-FOUR TECH vendor AIMEX 0x09dc AIMEX vendor ADDONICS 0x09df Addonics Technologies vendor AKAI 0x09e8 AKAI professional M.I. vendor ARESCOM 0x09f5 ARESCOM vendor BAY 0x09f9 Bay Associates vendor ALTERA 0x09fb Altera vendor CSR 0x0a12 Cambridge Silicon Radio vendor TREK 0x0a16 Trek Technology vendor ASAHIOPTICAL 0x0a17 Asahi Optical vendor BOCASYSTEMS 0x0a43 Boca Systems vendor SHANTOU 0x0a46 ShanTou vendor MEDIAGEAR 0x0a48 MediaGear vendor BROADCOM 0x0a5c Broadcom vendor GREENHOUSE 0x0a6b GREENHOUSE vendor MEDELI 0x0a67 Medeli vendor GEOCAST 0x0a79 Geocast Network Systems vendor EGO 0x0a92 EGO systems vendor IDQUANTIQUE 0x0aba ID Quantique vendor IDTECH 0x0acd ID TECH vendor ZYDAS 0x0ace Zydas Technology Corporation vendor NEODIO 0x0aec Neodio vendor OPTION 0x0af0 Option N.V. vendor ASUS 0x0b05 ASUSTeK Computer vendor TODOS 0x0b0c Todos Data System vendor SIIG2 0x0b39 SIIG vendor TEKRAM 0x0b3b Tekram Technology vendor HAL 0x0b41 HAL Corporation vendor EMS 0x0b43 EMS Production vendor NEC2 0x0b62 NEC vendor ADLINK 0x0b63 ADLINK Technoligy, Inc. vendor ATI2 0x0b6f ATI vendor ZEEVO 0x0b7a Zeevo, Inc. vendor KURUSUGAWA 0x0b7e Kurusugawa Electronics, Inc. vendor SMART 0x0b8c Smart Technologies vendor ASIX 0x0b95 ASIX Electronics vendor O2MICRO 0x0b97 O2 Micro, Inc. vendor USR 0x0baf U.S. Robotics vendor AMBIT 0x0bb2 Ambit Microsystems vendor HTC 0x0bb4 HTC vendor REALTEK 0x0bda Realtek vendor ERICSSON2 0x0bdb Ericsson vendor MEI 0x0bed MEI vendor ADDONICS2 0x0bf6 Addonics Technology vendor FSC 0x0bf8 Fujitsu Siemens Computers vendor AGATE 0x0c08 Agate Technologies vendor DMI 0x0c0b DMI vendor CANYON 0x0c10 Canyon vendor ICOM 0x0c26 Icom Inc. vendor GNOTOMETRICS 0x0c33 GN Otometrics vendor CHICONY2 0x0c45 Chicony / Microdia / Sonix Technology Co., Ltd. vendor REINERSCT 0x0c4b Reiner-SCT vendor SEALEVEL 0x0c52 Sealevel System vendor JETI 0x0c6c Jeti vendor LUWEN 0x0c76 Luwen vendor ELEKTOR 0x0c7d ELEKTOR Electronics vendor KYOCERA2 0x0c88 Kyocera Wireless Corp. vendor ZCOM 0x0cde Z-Com vendor ATHEROS2 0x0cf3 Atheros Communications vendor POSIFLEX 0x0d3a POSIFLEX vendor TANGTOP 0x0d3d Tangtop vendor KOBIL 0x0d46 KOBIL vendor SMC3 0x0d5c Standard Microsystems vendor ADDON 0x0d7d Add-on Technology vendor ACDC 0x0d7e American Computer & Digital Components vendor CMEDIA 0x0d8c CMEDIA vendor CONCEPTRONIC 0x0d8e Conceptronic vendor SKANHEX 0x0d96 Skanhex Technology, Inc. vendor MSI 0x0db0 Micro Star International vendor ELCON 0x0db7 ELCON Systemtechnik vendor UNKNOWN4 0x0dcd Unknown vendor vendor NETAC 0x0dd8 Netac vendor SITECOMEU 0x0df6 Sitecom Europe vendor MOBILEACTION 0x0df7 Mobile Action vendor AMIGO 0x0e0b Amigo Technology vendor SPEEDDRAGON 0x0e55 Speed Dragon Multimedia vendor HAWKING 0x0e66 Hawking vendor FOSSIL 0x0e67 Fossil, Inc vendor GMATE 0x0e7e G.Mate, Inc vendor MEDIATEK 0x0e8d MediaTek, Inc. vendor OTI 0x0ea0 Ours Technology vendor YISO 0x0eab Yiso Wireless Co. vendor PILOTECH 0x0eaf Pilotech vendor NOVATECH 0x0eb0 NovaTech vendor ITEGNO 0x0eba iTegno vendor WINMAXGROUP 0x0ed1 WinMaxGroup vendor TOD 0x0ede TOD vendor EGALAX 0x0eef eGalax, Inc. vendor AIRPRIME 0x0f3d AirPrime, Inc. vendor MICROTUNE 0x0f4d Microtune vendor VTECH 0x0f88 VTech vendor FALCOM 0x0f94 Falcom Wireless Communications GmbH vendor RIM 0x0fca Research In Motion vendor DYNASTREAM 0x0fcf Dynastream Innovations vendor LARSENBRUSGAARD 0x0fd8 Larsen and Brusgaard vendor OWL 0x0fde OWL vendor KONTRON 0x0fe6 Kontron AG vendor QUALCOMM 0x1004 Qualcomm vendor APACER 0x1005 Apacer vendor MOTOROLA4 0x100d Motorola vendor HP3 0x103c Hewlett Packard vendor AIRPLUS 0x1011 Airplus vendor DESKNOTE 0x1019 Desknote vendor NEC3 0x1033 NEC vendor TTI 0x103e Thurlby Thandar Instruments vendor GIGABYTE 0x1044 GIGABYTE vendor WESTERN 0x1058 Western Digital vendor MOTOROLA 0x1063 Motorola vendor CCYU 0x1065 CCYU Technology vendor CURITEL 0x106c Curitel Communications Inc vendor SILABS2 0x10a6 SILABS2 vendor USI 0x10ab USI vendor LIEBERT2 0x10af Liebert vendor PLX 0x10b5 PLX vendor ASANTE 0x10bd Asante vendor SILABS 0x10c4 Silicon Labs vendor SILABS3 0x10c5 Silicon Labs vendor SILABS4 0x10ce Silicon Labs vendor ACTIONS 0x10d6 Actions vendor ANALOG 0x1110 Analog Devices vendor TENX 0x1130 Ten X Technology, Inc. vendor ISSC 0x1131 Integrated System Solution Corp. vendor JRC 0x1145 Japan Radio Company vendor SPHAIRON 0x114b Sphairon Access Systems GmbH vendor DELORME 0x1163 DeLorme vendor SERVERWORKS 0x1166 ServerWorks vendor DLINK3 0x1186 Dlink vendor ACERCM 0x1189 Acer Communications & Multimedia vendor SIERRA 0x1199 Sierra Wireless vendor SANWA 0x11ad Sanwa Electric Instrument Co., Ltd. vendor TOPFIELD 0x11db Topfield Co., Ltd vendor SIEMENS3 0x11f5 Siemens vendor NETINDEX 0x11f6 NetIndex vendor ALCATEL 0x11f7 Alcatel vendor INTERBIOMETRICS 0x1209 Interbiometrics vendor UNKNOWN3 0x1233 Unknown vendor vendor TSUNAMI 0x1241 Tsunami vendor PHEENET 0x124a Pheenet vendor TARGUS 0x1267 Targus vendor TWINMOS 0x126f TwinMOS vendor TENDA 0x1286 Tenda vendor TESTO 0x128d Testo products vendor CREATIVE2 0x1292 Creative Labs vendor BELKIN2 0x1293 Belkin Components vendor CYBERTAN 0x129b CyberTAN Technology vendor HUAWEI 0x12d1 Huawei Technologies vendor ARANEUS 0x12d8 Araneus Information Systems vendor TAPWAVE 0x12ef Tapwave vendor AINCOMM 0x12fd Aincomm vendor MOBILITY 0x1342 Mobility vendor DICKSMITH 0x1371 Dick Smith Electronics vendor NETGEAR3 0x1385 Netgear vendor BALTECH 0x13ad Baltech vendor CISCOLINKSYS 0x13b1 Cisco-Linksys vendor SHARK 0x13d2 Shark vendor AZUREWAVE 0x13d3 AsureWave vendor INITIO 0x13fd Initio Corporation vendor EMTEC 0x13fe Emtec vendor NOVATEL 0x1410 Novatel Wireless vendor MERLIN 0x1416 Merlin vendor REDOCTANE 0x1430 RedOctane vendor WISTRONNEWEB 0x1435 Wistron NeWeb vendor RADIOSHACK 0x1453 Radio Shack vendor FIC 0x1457 FIC / OpenMoko vendor HUAWEI3COM 0x1472 Huawei-3Com vendor ABOCOM2 0x1482 AboCom Systems vendor SILICOM 0x1485 Silicom vendor RALINK 0x148f Ralink Technology vendor IMAGINATION 0x149a Imagination Technologies vendor ATP 0x14af ATP Electronics vendor CONCEPTRONIC2 0x14b2 Conceptronic vendor SUPERTOP 0x14cd Super Top vendor PLANEX3 0x14ea Planex Communications vendor SILICONPORTALS 0x1527 Silicon Portals vendor UBIQUAM 0x1529 UBIQUAM Co., Ltd. vendor JMICRON 0x152d JMicron vendor UBLOX 0x1546 U-blox vendor PNY 0x154b PNY vendor OWEN 0x1555 Owen vendor OQO 0x1557 OQO vendor UMEDIA 0x157e U-MEDIA Communications vendor FIBERLINE 0x1582 Fiberline vendor FREESCALE 0x15a2 Freescale Semiconductor, Inc. vendor AFATECH 0x15a4 Afatech Technologies, Inc. vendor SPARKLAN 0x15a9 SparkLAN vendor OLIMEX 0x15ba Olimex vendor SOUNDGRAPH 0x15c2 Soundgraph, Inc. vendor AMIT2 0x15c5 AMIT vendor TEXTECH 0x15ca Textech International Ltd. vendor SOHOWARE 0x15e8 SOHOware vendor UMAX 0x1606 UMAX Data Systems vendor INSIDEOUT 0x1608 Inside Out Networks vendor AMOI 0x1614 Amoi Electronics vendor GOODWAY 0x1631 Good Way Technology vendor ENTREGA 0x1645 Entrega vendor ACTIONTEC 0x1668 Actiontec Electronics vendor CLIPSAL 0x166a Clipsal vendor CISCOLINKSYS2 0x167b Cisco-Linksys vendor ATHEROS 0x168c Atheros Communications vendor GIGASET 0x1690 Gigaset vendor GLOBALSUN 0x16ab Global Sun Technology vendor ANYDATA 0x16d5 AnyDATA Corporation vendor JABLOTRON 0x16d6 Jablotron vendor CMOTECH 0x16d8 C-motech vendor WIENERPLEINBAUS 0x16dc WIENER Plein & Baus GmbH. vendor AXESSTEL 0x1726 Axesstel Co., Ltd. vendor LINKSYS4 0x1737 Linksys vendor SENAO 0x1740 Senao vendor ASUS2 0x1761 ASUS vendor SWEEX2 0x177f Sweex vendor METAGEEK 0x1781 MetaGeek vendor KAMSTRUP 0x17a8 Kamstrup A/S vendor DISPLAYLINK 0x17e9 DisplayLink vendor LENOVO 0x17ef Lenovo vendor WAVESENSE 0x17f4 WaveSense vendor VAISALA 0x1843 Vaisala vendor AMIT 0x18c5 AMIT vendor GOOGLE 0x18d1 Google vendor QCOM 0x18e8 Qcom vendor ELV 0x18ef ELV vendor LINKSYS3 0x1915 Linksys vendor QUALCOMMINC 0x19d2 Qualcomm, Incorporated vendor QUALCOMM3 0x19f5 Qualcomm, Inc. vendor BAYER 0x1a79 Bayer vendor WCH2 0x1a86 QinHeng Electronics vendor STELERA 0x1a8d Stelera Wireless vendor SEL 0x1adb Schweitzer Engineering Laboratories vendor CORSAIR 0x1b1c Corsair vendor MATRIXORBITAL 0x1b3d Matrix Orbital vendor OVISLINK 0x1b75 OvisLink vendor TML 0x1b91 The Mobility Lab vendor TCTMOBILE 0x1bbb TCT Mobile vendor ALTI2 0x1bc9 Alti-2 products vendor SUNPLUS 0x1bcf Sunplus Innovation Technology Inc. vendor WAGO 0x1be3 WAGO Kontakttechnik GmbH. vendor TELIT 0x1bc7 Telit vendor IONICS 0x1c0c Ionics PlugComputer vendor LONGCHEER 0x1c9e Longcheer Holdings, Ltd. vendor MPMAN 0x1cae MpMan vendor DRESDENELEKTRONIK 0x1cf1 dresden elektronik vendor NEOTEL 0x1d09 Neotel vendor DREAMLINK 0x1d34 Dream Link vendor PEGATRON 0x1d4d Pegatron vendor QISDA 0x1da5 Qisda vendor METAGEEK2 0x1dd5 MetaGeek vendor ALINK 0x1e0e Alink vendor AIRTIES 0x1eda AirTies vendor FESTO 0x1e29 Festo vendor LAKESHORE 0x1fb9 Lake Shore Cryotronics, Inc. vendor VERTEX 0x1fe7 Vertex Wireless Co., Ltd. vendor DLINK 0x2001 D-Link vendor PLANEX2 0x2019 Planex Communications vendor HAUPPAUGE2 0x2040 Hauppauge Computer Works vendor TLAYTECH 0x20b9 Tlay Tech vendor ENCORE 0x203d Encore vendor QIHARDWARE 0x20b7 QI-hardware vendor PARA 0x20b8 PARA Industrial vendor SIMTEC 0x20df Simtec Electronics vendor TRENDNET 0x20f4 TRENDnet vendor RTSYSTEMS 0x2100 RTSYSTEMS vendor VIALABS 0x2109 VIA Labs vendor ERICSSON 0x2282 Ericsson vendor MOTOROLA2 0x22b8 Motorola vendor WETELECOM 0x22de WeTelecom vendor WESTMOUNTAIN 0x2405 West Mountain Radio vendor TRIPPLITE 0x2478 Tripp-Lite vendor HIROSE 0x2631 Hirose Electric vendor NHJ 0x2770 NHJ vendor PLANEX 0x2c02 Planex Communications vendor VIDZMEDIA 0x3275 VidzMedia Pte Ltd vendor LINKINSTRUMENTS 0x3195 Link Instruments Inc. vendor AEI 0x3334 AEI vendor HANK 0x3353 Hank Connection vendor PQI 0x3538 PQI vendor DAISY 0x3579 Daisy Technology vendor NI 0x3923 National Instruments vendor MICRONET 0x3980 Micronet Communications vendor IODATA2 0x40bb I-O Data vendor IRIVER 0x4102 iRiver vendor DELL 0x413c Dell vendor WCH 0x4348 QinHeng Electronics vendor ACEECA 0x4766 Aceeca vendor FEIXUN 0x4855 FeiXun Communication vendor PAPOUCH 0x5050 Papouch products vendor AVERATEC 0x50c2 Averatec vendor SWEEX 0x5173 Sweex vendor PROLIFIC2 0x5372 Prolific Technologies vendor ONSPEC2 0x55aa OnSpec Electronic Inc. vendor ZINWELL 0x5a57 Zinwell vendor SITECOM 0x6189 Sitecom vendor ARKMICRO 0x6547 Arkmicro Technologies Inc. vendor 3COM2 0x6891 3Com vendor EDIMAX 0x7392 Edimax vendor INTEL 0x8086 Intel vendor INTEL2 0x8087 Intel vendor ALLWIN 0x8516 ALLWIN Tech vendor SITECOM2 0x9016 Sitecom vendor MOSCHIP 0x9710 MosChip Semiconductor vendor NETGEAR4 0x9846 Netgear vendor MARVELL 0x9e88 Marvell Technology Group Ltd. vendor 3COM3 0xa727 3Com vendor CACE 0xcace CACE Technologies vendor EVOLUTION 0xdeee Evolution Robotics products vendor DATAAPEX 0xdaae DataApex vendor HP2 0xf003 Hewlett Packard vendor LOGILINK 0xfc08 LogiLink vendor USRP 0xfffe GNU Radio USRP /* * List of known products. Grouped by vendor. */ /* 3Com products */ product 3COM HOMECONN 0x009d HomeConnect Camera product 3COM 3CREB96 0x00a0 Bluetooth USB Adapter product 3COM 3C19250 0x03e8 3C19250 Ethernet Adapter product 3COM 3CRSHEW696 0x0a01 3CRSHEW696 Wireless Adapter product 3COM 3C460 0x11f8 HomeConnect 3C460 product 3COM USR56K 0x3021 U.S.Robotics 56000 Voice FaxModem Pro product 3COM 3C460B 0x4601 HomeConnect 3C460B product 3COM2 3CRUSB10075 0xa727 3CRUSB10075 product 3COM3 AR5523_1 0x6893 AR5523 product 3COM3 AR5523_2 0x6895 AR5523 product 3COM3 AR5523_3 0x6897 AR5523 product 3COMUSR OFFICECONN 0x0082 3Com OfficeConnect Analog Modem product 3COMUSR USRISDN 0x008f 3Com U.S. Robotics Pro ISDN TA product 3COMUSR HOMECONN 0x009d 3Com HomeConnect Camera product 3COMUSR USR56K 0x3021 U.S. Robotics 56000 Voice FaxModem Pro /* AboCom products */ product ABOCOM XX1 0x110c XX1 product ABOCOM XX2 0x200c XX2 product ABOCOM RT2770 0x2770 RT2770 product ABOCOM RT2870 0x2870 RT2870 product ABOCOM RT3070 0x3070 RT3070 product ABOCOM RT3071 0x3071 RT3071 product ABOCOM RT3072 0x3072 RT3072 product ABOCOM2 RT2870_1 0x3c09 RT2870 product ABOCOM URE450 0x4000 URE450 Ethernet Adapter product ABOCOM UFE1000 0x4002 UFE1000 Fast Ethernet Adapter product ABOCOM DSB650TX_PNA 0x4003 1/10/100 Ethernet Adapter product ABOCOM XX4 0x4004 XX4 product ABOCOM XX5 0x4007 XX5 product ABOCOM XX6 0x400b XX6 product ABOCOM XX7 0x400c XX7 product ABOCOM RTL8151 0x401a RTL8151 product ABOCOM XX8 0x4102 XX8 product ABOCOM XX9 0x4104 XX9 product ABOCOM UF200 0x420a UF200 Ethernet product ABOCOM WL54 0x6001 WL54 product ABOCOM XX10 0xabc1 XX10 product ABOCOM BWU613 0xb000 BWU613 product ABOCOM HWU54DM 0xb21b HWU54DM product ABOCOM RT2573_2 0xb21c RT2573 product ABOCOM RT2573_3 0xb21d RT2573 product ABOCOM RT2573_4 0xb21e RT2573 product ABOCOM RTL8188CU_1 0x8188 RTL8188CU product ABOCOM RTL8188CU_2 0x8189 RTL8188CU product ABOCOM RTL8192CU 0x8178 RTL8192CU +product ABOCOM RTL8188EU 0x8179 RTL8188EU product ABOCOM WUG2700 0xb21f WUG2700 /* Acton Research Corp. */ product ACTON SPECTRAPRO 0x0100 FTDI compatible adapter /* Accton products */ product ACCTON USB320_EC 0x1046 USB320-EC Ethernet Adapter product ACCTON 2664W 0x3501 2664W product ACCTON 111 0x3503 T-Sinus 111 Wireless Adapter product ACCTON SMCWUSBG_NF 0x4505 SMCWUSB-G (no firmware) product ACCTON SMCWUSBG 0x4506 SMCWUSB-G product ACCTON SMCWUSBTG2_NF 0x4507 SMCWUSBT-G2 (no firmware) product ACCTON SMCWUSBTG2 0x4508 SMCWUSBT-G2 product ACCTON PRISM_GT 0x4521 PrismGT USB 2.0 WLAN product ACCTON SS1001 0x5046 SpeedStream Ethernet Adapter product ACCTON RT2870_2 0x6618 RT2870 product ACCTON RT3070 0x7511 RT3070 product ACCTON RT2770 0x7512 RT2770 product ACCTON RT2870_3 0x7522 RT2870 product ACCTON RT2870_5 0x8522 RT2870 product ACCTON RT3070_4 0xa512 RT3070 product ACCTON RT2870_4 0xa618 RT2870 product ACCTON RT3070_1 0xa701 RT3070 product ACCTON RT3070_2 0xa702 RT3070 product ACCTON RT2870_1 0xb522 RT2870 product ACCTON RT3070_3 0xc522 RT3070 product ACCTON RT3070_5 0xd522 RT3070 product ACCTON RTL8192SU 0xc512 RTL8192SU product ACCTON ZD1211B 0xe501 ZD1211B product ACCTON WN7512 0xf522 WN7512 /* Aceeca products */ product ACEECA MEZ1000 0x0001 MEZ1000 RDA /* Acer Communications & Multimedia (oemd by Surecom) */ product ACERCM EP1427X2 0x0893 EP-1427X-2 Ethernet Adapter /* Acer Labs products */ product ACERLABS M5632 0x5632 USB 2.0 Data Link /* Acer Peripherals, Inc. products */ product ACERP ACERSCAN_C310U 0x12a6 Acerscan C310U product ACERP ACERSCAN_320U 0x2022 Acerscan 320U product ACERP ACERSCAN_640U 0x2040 Acerscan 640U product ACERP ACERSCAN_620U 0x2060 Acerscan 620U product ACERP ACERSCAN_4300U 0x20b0 Benq 3300U/4300U product ACERP ACERSCAN_640BT 0x20be Acerscan 640BT product ACERP ACERSCAN_1240U 0x20c0 Acerscan 1240U product ACERP S81 0x4027 BenQ S81 phone product ACERP H10 0x4068 AWL400 Wireless Adapter product ACERP ATAPI 0x6003 ATA/ATAPI Adapter product ACERP AWL300 0x9000 AWL300 Wireless Adapter product ACERP AWL400 0x9001 AWL400 Wireless Adapter /* Acer Warp products */ product ACERW WARPLINK 0x0204 Warplink /* Actions products */ product ACTIONS MP4 0x1101 Actions MP4 Player /* Actiontec, Inc. products */ product ACTIONTEC PRISM_25 0x0408 Prism2.5 Wireless Adapter product ACTIONTEC PRISM_25A 0x0421 Prism2.5 Wireless Adapter A product ACTIONTEC FREELAN 0x6106 ROPEX FreeLan 802.11b product ACTIONTEC UAT1 0x7605 UAT1 Wireless Ethernet Adapter /* ACTiSYS products */ product ACTISYS IR2000U 0x0011 ACT-IR2000U FIR /* ActiveWire, Inc. products */ product ACTIVEWIRE IOBOARD 0x0100 I/O Board product ACTIVEWIRE IOBOARD_FW1 0x0101 I/O Board, rev. 1 firmware /* Adaptec products */ product ADAPTEC AWN8020 0x0020 AWN-8020 WLAN /* Addtron products */ product ADDTRON AWU120 0xff31 AWU-120 /* ADLINK Texhnology products */ product ADLINK ND6530 0x6530 ND-6530 USB-Serial /* ADMtek products */ product ADMTEK PEGASUSII_4 0x07c2 AN986A Ethernet product ADMTEK PEGASUS 0x0986 AN986 Ethernet product ADMTEK PEGASUSII 0x8511 AN8511 Ethernet product ADMTEK PEGASUSII_2 0x8513 AN8513 Ethernet product ADMTEK PEGASUSII_3 0x8515 AN8515 Ethernet /* ADDON products */ /* PNY OEMs these */ product ADDON ATTACHE 0x1300 USB 2.0 Flash Drive product ADDON ATTACHE 0x1300 USB 2.0 Flash Drive product ADDON A256MB 0x1400 Attache 256MB USB 2.0 Flash Drive product ADDON DISKPRO512 0x1420 USB 2.0 Flash Drive (DANE-ELEC zMate 512MB USB flash drive) /* Addonics products */ product ADDONICS2 CABLE_205 0xa001 Cable 205 /* ADS products */ product ADS UBS10BT 0x0008 UBS-10BT Ethernet product ADS UBS10BTX 0x0009 UBS-10BT Ethernet /* AEI products */ product AEI FASTETHERNET 0x1701 Fast Ethernet /* Afatech Technologies, Inc. */ product AFATECH AFATECH1336 0x1336 Flash Card Reader /* Agate Technologies products */ product AGATE QDRIVE 0x0378 Q-Drive /* AGFA products */ product AGFA SNAPSCAN1212U 0x0001 SnapScan 1212U product AGFA SNAPSCAN1236U 0x0002 SnapScan 1236U product AGFA SNAPSCANTOUCH 0x0100 SnapScan Touch product AGFA SNAPSCAN1212U2 0x2061 SnapScan 1212U product AGFA SNAPSCANE40 0x208d SnapScan e40 product AGFA SNAPSCANE50 0x208f SnapScan e50 product AGFA SNAPSCANE20 0x2091 SnapScan e20 product AGFA SNAPSCANE25 0x2095 SnapScan e25 product AGFA SNAPSCANE26 0x2097 SnapScan e26 product AGFA SNAPSCANE52 0x20fd SnapScan e52 /* Ain Communication Technology products */ product AINCOMM AWU2000B 0x1001 AWU2000B Wireless Adapter /* AIPTEK products */ product AIPTEK POCKETCAM3M 0x2011 PocketCAM 3Mega product AIPTEK2 PENCAM_MEGA_1_3 0x504a PenCam Mega 1.3 product AIPTEK2 SUNPLUS_TECH 0x0c15 Sunplus Technology Inc. /* AirPlis products */ product AIRPLUS MCD650 0x3198 MCD650 modem /* AirPrime products */ product AIRPRIME PC5220 0x0112 CDMA Wireless PC Card product AIRPRIME USB308 0x68A3 USB308 HSPA+ USB Modem product AIRPRIME AC313U 0x68aa Sierra Wireless AirCard 313U /* AirTies products */ product AIRTIES RT3070 0x2310 RT3070 /* AKS products */ product AKS USBHASP 0x0001 USB-HASP 0.06 /* Alcatel products */ product ALCATEL OT535 0x02df One Touch 535/735 /* Alcor Micro, Inc. products */ product ALCOR2 KBD_HUB 0x2802 Kbd Hub product ALCOR DUMMY 0x0000 Dummy product product ALCOR SDCR_6335 0x6335 SD/MMC Card Reader product ALCOR SDCR_6362 0x6362 SD/MMC Card Reader product ALCOR SDCR_6366 0x6366 SD/MMC Card Reader product ALCOR TRANSCEND 0x6387 Transcend JetFlash Drive product ALCOR MA_KBD_HUB 0x9213 MacAlly Kbd Hub product ALCOR AU9814 0x9215 AU9814 Hub product ALCOR UMCR_9361 0x9361 USB Multimedia Card Reader product ALCOR SM_KBD 0x9410 MicroConnectors/StrongMan Keyboard product ALCOR NEC_KBD_HUB 0x9472 NEC Kbd Hub product ALCOR AU9720 0x9720 USB2 - RS-232 product ALCOR AU6390 0x6390 AU6390 USB-IDE converter /* Alink products */ product ALINK DWM652U5 0xce16 DWM-652 product ALINK 3G 0x9000 3G modem product ALINK 3GU 0x9200 3G modem /* Altec Lansing products */ product ALTEC ADA70 0x0070 ADA70 Speakers product ALTEC ASC495 0xff05 ASC495 Speakers /* Alti-2 products */ product ALTI2 N3 0x6001 FTDI compatible adapter /* Allied Telesyn International products */ product ALLIEDTELESYN ATUSB100 0xb100 AT-USB100 /* ALLWIN Tech products */ product ALLWIN RT2070 0x2070 RT2070 product ALLWIN RT2770 0x2770 RT2770 product ALLWIN RT2870 0x2870 RT2870 product ALLWIN RT3070 0x3070 RT3070 product ALLWIN RT3071 0x3071 RT3071 product ALLWIN RT3072 0x3072 RT3072 product ALLWIN RT3572 0x3572 RT3572 /* AlphaSmart, Inc. products */ product ALPHASMART DANA_KB 0xdbac AlphaSmart Dana Keyboard product ALPHASMART DANA_SYNC 0xdf00 AlphaSmart Dana HotSync /* Amoi products */ product AMOI H01 0x0800 H01 3G modem product AMOI H01A 0x7002 H01A 3G modem product AMOI H02 0x0802 H02 3G modem /* American Power Conversion products */ product APC UPS 0x0002 Uninterruptible Power Supply /* Ambit Microsystems products */ product AMBIT WLAN 0x0302 WLAN product AMBIT NTL_250 0x6098 NTL 250 cable modem /* Apacer products */ product APACER HT202 0xb113 USB 2.0 Flash Drive /* American Power Conversion products */ product APC UPS 0x0002 Uninterruptible Power Supply /* Amigo Technology products */ product AMIGO RT2870_1 0x9031 RT2870 product AMIGO RT2870_2 0x9041 RT2870 /* AMIT products */ product AMIT CGWLUSB2GO 0x0002 CG-WLUSB2GO product AMIT CGWLUSB2GNR 0x0008 CG-WLUSB2GNR product AMIT RT2870_1 0x0012 RT2870 /* AMIT(2) products */ product AMIT2 RT2870 0x0008 RT2870 /* Analog Devices products */ product ANALOGDEVICES GNICE 0xf000 FTDI compatible adapter product ANALOGDEVICES GNICEPLUS 0xf001 FTDI compatible adapter /* Anchor products */ product ANCHOR SERIAL 0x2008 Serial product ANCHOR EZUSB 0x2131 EZUSB product ANCHOR EZLINK 0x2720 EZLINK /* AnyData products */ product ANYDATA ADU_620UW 0x6202 CDMA 2000 EV-DO USB Modem product ANYDATA ADU_E100X 0x6501 CDMA 2000 1xRTT/EV-DO USB Modem product ANYDATA ADU_500A 0x6502 CDMA 2000 EV-DO USB Modem /* AOX, Inc. products */ product AOX USB101 0x0008 Ethernet /* American Power Conversion products */ product APC UPS 0x0002 Uninterruptible Power Supply /* Apple Computer products */ product APPLE DUMMY 0x0000 Dummy product product APPLE IMAC_KBD 0x0201 USB iMac Keyboard product APPLE KBD 0x0202 USB Keyboard M2452 product APPLE EXT_KBD 0x020c Apple Extended USB Keyboard /* MacbookAir, aka wellspring */ product APPLE WELLSPRING_ANSI 0x0223 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING_ISO 0x0224 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING_JIS 0x0225 Apple Internal Keyboard/Trackpad /* MacbookProPenryn, aka wellspring2 */ product APPLE WELLSPRING2_ANSI 0x0230 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING2_ISO 0x0231 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING2_JIS 0x0232 Apple Internal Keyboard/Trackpad /* Macbook5,1 (unibody), aka wellspring3 */ product APPLE WELLSPRING3_ANSI 0x0236 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING3_ISO 0x0237 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING3_JIS 0x0238 Apple Internal Keyboard/Trackpad /* MacbookAir3,2 (unibody), aka wellspring4 */ product APPLE WELLSPRING4_ANSI 0x023f Apple Internal Keyboard/Trackpad product APPLE WELLSPRING4_ISO 0x0240 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING4_JIS 0x0241 Apple Internal Keyboard/Trackpad /* MacbookAir3,1 (unibody), aka wellspring4 */ product APPLE WELLSPRING4A_ANSI 0x0242 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING4A_ISO 0x0243 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING4A_JIS 0x0244 Apple Internal Keyboard/Trackpad /* Macbook8 (unibody, March 2011) */ product APPLE WELLSPRING5_ANSI 0x0245 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING5_ISO 0x0246 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING5_JIS 0x0247 Apple Internal Keyboard/Trackpad /* MacbookAir4,1 (unibody, July 2011) */ product APPLE WELLSPRING6A_ANSI 0x0249 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING6A_ISO 0x024a Apple Internal Keyboard/Trackpad product APPLE WELLSPRING6A_JIS 0x024b Apple Internal Keyboard/Trackpad /* MacbookAir4,2 (unibody, July 2011) */ product APPLE WELLSPRING6_ANSI 0x024c Apple Internal Keyboard/Trackpad product APPLE WELLSPRING6_ISO 0x024d Apple Internal Keyboard/Trackpad product APPLE WELLSPRING6_JIS 0x024e Apple Internal Keyboard/Trackpad /* Macbook8,2 (unibody) */ product APPLE WELLSPRING5A_ANSI 0x0252 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING5A_ISO 0x0253 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING5A_JIS 0x0254 Apple Internal Keyboard/Trackpad /* MacbookPro10,1 (unibody, June 2012) */ product APPLE WELLSPRING7_ANSI 0x0262 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING7_ISO 0x0263 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING7_JIS 0x0264 Apple Internal Keyboard/Trackpad /* MacbookPro10,2 (unibody, October 2012) */ product APPLE WELLSPRING7A_ANSI 0x0259 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING7A_ISO 0x025a Apple Internal Keyboard/Trackpad product APPLE WELLSPRING7A_JIS 0x025b Apple Internal Keyboard/Trackpad /* MacbookAir6,2 (unibody, June 2013) */ product APPLE WELLSPRING8_ANSI 0x0290 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING8_ISO 0x0291 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING8_JIS 0x0292 Apple Internal Keyboard/Trackpad /* MacbookPro12,1 */ product APPLE WELLSPRING9_ANSI 0x0272 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING9_ISO 0x0273 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING9_JIS 0x0274 Apple Internal Keyboard/Trackpad product APPLE MOUSE 0x0301 Mouse M4848 product APPLE OPTMOUSE 0x0302 Optical mouse product APPLE MIGHTYMOUSE 0x0304 Mighty Mouse product APPLE KBD_HUB 0x1001 Hub in Apple USB Keyboard product APPLE EXT_KBD_HUB 0x1003 Hub in Apple Extended USB Keyboard product APPLE SPEAKERS 0x1101 Speakers product APPLE IPOD 0x1201 iPod product APPLE IPOD2G 0x1202 iPod 2G product APPLE IPOD3G 0x1203 iPod 3G product APPLE IPOD_04 0x1204 iPod '04' product APPLE IPODMINI 0x1205 iPod Mini product APPLE IPOD_06 0x1206 iPod '06' product APPLE IPOD_07 0x1207 iPod '07' product APPLE IPOD_08 0x1208 iPod '08' product APPLE IPODVIDEO 0x1209 iPod Video product APPLE IPODNANO 0x120a iPod Nano product APPLE IPHONE 0x1290 iPhone product APPLE IPOD_TOUCH 0x1291 iPod Touch product APPLE IPHONE_3G 0x1292 iPhone 3G product APPLE IPHONE_3GS 0x1294 iPhone 3GS product APPLE IPHONE_4 0x1297 iPhone 4 product APPLE IPHONE_4S 0x12a0 iPhone 4S product APPLE IPHONE_5 0x12a8 iPhone 5 product APPLE IPAD 0x129a iPad product APPLE ETHERNET 0x1402 Ethernet A1277 /* Arkmicro Technologies */ product ARKMICRO ARK3116 0x0232 ARK3116 Serial /* Asahi Optical products */ product ASAHIOPTICAL OPTIO230 0x0004 Digital camera product ASAHIOPTICAL OPTIO330 0x0006 Digital camera /* Asante products */ product ASANTE EA 0x1427 Ethernet /* ASIX Electronics products */ product ASIX AX88172 0x1720 10/100 Ethernet product ASIX AX88178 0x1780 AX88178 product ASIX AX88178A 0x178a AX88178A USB 2.0 10/100/1000 Ethernet product ASIX AX88179 0x1790 AX88179 USB 3.0 10/100/1000 Ethernet product ASIX AX88772 0x7720 AX88772 product ASIX AX88772A 0x772a AX88772A USB 2.0 10/100 Ethernet product ASIX AX88772B 0x772b AX88772B USB 2.0 10/100 Ethernet product ASIX AX88772B_1 0x7e2b AX88772B USB 2.0 10/100 Ethernet /* ASUS products */ product ASUS2 USBN11 0x0b05 USB-N11 product ASUS RT2570 0x1706 RT2500USB Wireless Adapter product ASUS WL167G 0x1707 WL-167g Wireless Adapter product ASUS WL159G 0x170c WL-159g product ASUS A9T_WIFI 0x171b A9T wireless product ASUS P5B_WIFI 0x171d P5B wireless product ASUS RT2573_1 0x1723 RT2573 product ASUS RT2573_2 0x1724 RT2573 product ASUS LCM 0x1726 LCM display product ASUS RT2870_1 0x1731 RT2870 product ASUS RT2870_2 0x1732 RT2870 product ASUS RT2870_3 0x1742 RT2870 product ASUS RT2870_4 0x1760 RT2870 product ASUS RT2870_5 0x1761 RT2870 product ASUS USBN13 0x1784 USB-N13 product ASUS USBN10 0x1786 USB-N10 product ASUS RT3070_1 0x1790 RT3070 product ASUS RTL8192SU 0x1791 RTL8192SU product ASUS USB_N53 0x179d ASUS Black Diamond Dual Band USB-N53 product ASUS RTL8192CU 0x17ab RTL8192CU product ASUS USBN66 0x17ad USB-N66 product ASUS USBN10NANO 0x17ba USB-N10 Nano product ASUS USBAC51 0x17d1 USB-AC51 product ASUS A730W 0x4202 ASUS MyPal A730W product ASUS P535 0x420f ASUS P535 PDA product ASUS GMSC 0x422f ASUS Generic Mass Storage /* ATen products */ product ATEN UC1284 0x2001 Parallel printer product ATEN UC10T 0x2002 10Mbps Ethernet product ATEN UC110T 0x2007 UC-110T Ethernet product ATEN UC232A 0x2008 Serial product ATEN UC210T 0x2009 UC-210T Ethernet product ATEN DSB650C 0x4000 DSB-650C /* ATP Electronics products */ product ATP EUSB 0xaf01 ATP IG eUSB SSD /* Atheros Communications products */ product ATHEROS AR5523 0x0001 AR5523 product ATHEROS AR5523_NF 0x0002 AR5523 (no firmware) product ATHEROS2 AR5523_1 0x0001 AR5523 product ATHEROS2 AR5523_1_NF 0x0002 AR5523 (no firmware) product ATHEROS2 AR5523_2 0x0003 AR5523 product ATHEROS2 AR5523_2_NF 0x0004 AR5523 (no firmware) product ATHEROS2 AR5523_3 0x0005 AR5523 product ATHEROS2 AR5523_3_NF 0x0006 AR5523 (no firmware) product ATHEROS2 TG121N 0x1001 TG121N product ATHEROS2 WN821NV2 0x1002 WN821NV2 product ATHEROS2 3CRUSBN275 0x1010 3CRUSBN275 product ATHEROS2 WN612 0x1011 WN612 product ATHEROS2 AR9170 0x9170 AR9170 /* Atmel Comp. products */ product ATMEL STK541 0x2109 Zigbee Controller product ATMEL UHB124 0x3301 UHB124 hub product ATMEL DWL120 0x7603 DWL-120 Wireless Adapter product ATMEL BW002 0x7605 BW002 Wireless Adapter product ATMEL WL1130USB 0x7613 WL-1130 USB product ATMEL AT76C505A 0x7614 AT76c505a Wireless Adapter /* AuthenTec products */ product AUTHENTEC AES1610 0x1600 AES1610 Fingerprint Sensor /* Avision products */ product AVISION 1200U 0x0268 1200U scanner /* AVM products */ product AVM FRITZWLAN 0x8401 FRITZ!WLAN N /* Axesstel products */ product AXESSTEL DATAMODEM 0x1000 Data Modem /* AsureWave products */ product AZUREWAVE RT2870_1 0x3247 RT2870 product AZUREWAVE RT2870_2 0x3262 RT2870 product AZUREWAVE RT3070_1 0x3273 RT3070 product AZUREWAVE RT3070_2 0x3284 RT3070 product AZUREWAVE RT3070_3 0x3305 RT3070 product AZUREWAVE RTL8188CU 0x3357 RTL8188CU product AZUREWAVE RTL8188CE_1 0x3358 RTL8188CE product AZUREWAVE RTL8188CE_2 0x3359 RTL8188CE product AZUREWAVE RTL8192SU_1 0x3306 RTL8192SU product AZUREWAVE RTL8192SU_2 0x3309 RTL8192SU product AZUREWAVE RTL8192SU_3 0x3310 RTL8192SU product AZUREWAVE RTL8192SU_4 0x3311 RTL8192SU product AZUREWAVE RTL8192SU_5 0x3325 RTL8192SU /* Baltech products */ product BALTECH CARDREADER 0x9999 Card reader /* Bayer products */ product BAYER CONTOUR_CABLE 0x6001 FTDI compatible adapter /* B&B Electronics products */ product BBELECTRONICS USOTL4 0xAC01 RS-422/485 product BBELECTRONICS 232USB9M 0xac27 FTDI compatible adapter product BBELECTRONICS 485USB9F_2W 0xac25 FTDI compatible adapter product BBELECTRONICS 485USB9F_4W 0xac26 FTDI compatible adapter product BBELECTRONICS 485USBTB_2W 0xac33 FTDI compatible adapter product BBELECTRONICS 485USBTB_4W 0xac34 FTDI compatible adapter product BBELECTRONICS TTL3USB9M 0xac50 FTDI compatible adapter product BBELECTRONICS TTL5USB9M 0xac49 FTDI compatible adapter product BBELECTRONICS USO9ML2 0xac03 FTDI compatible adapter product BBELECTRONICS USO9ML2DR 0xac17 FTDI compatible adapter product BBELECTRONICS USO9ML2DR_2 0xac16 FTDI compatible adapter product BBELECTRONICS USOPTL4 0xac11 FTDI compatible adapter product BBELECTRONICS USOPTL4DR 0xac19 FTDI compatible adapter product BBELECTRONICS USOPTL4DR2 0xac18 FTDI compatible adapter product BBELECTRONICS USPTL4 0xac12 FTDI compatible adapter product BBELECTRONICS USTL4 0xac02 FTDI compatible adapter product BBELECTRONICS ZZ_PROG1_USB 0xba02 FTDI compatible adapter /* Belkin products */ /*product BELKIN F5U111 0x???? F5U111 Ethernet*/ product BELKIN F5D6050 0x0050 F5D6050 802.11b Wireless Adapter product BELKIN FBT001V 0x0081 FBT001v2 Bluetooth product BELKIN FBT003V 0x0084 FBT003v2 Bluetooth product BELKIN F5U103 0x0103 F5U103 Serial product BELKIN F5U109 0x0109 F5U109 Serial product BELKIN USB2SCSI 0x0115 USB to SCSI product BELKIN F8T012 0x0121 F8T012xx1 Bluetooth USB Adapter product BELKIN USB2LAN 0x0121 USB to LAN product BELKIN F5U208 0x0208 F5U208 VideoBus II product BELKIN F5U237 0x0237 F5U237 USB 2.0 7-Port Hub product BELKIN F5U257 0x0257 F5U257 Serial product BELKIN F5U409 0x0409 F5U409 Serial product BELKIN F6C550AVR 0x0551 F6C550-AVR UPS product BELKIN F5U120 0x1203 F5U120-PC Hub product BELKIN RTL8188CU 0x1102 RTL8188CU Wireless Adapter product BELKIN F9L1103 0x1103 F9L1103 Wireless Adapter product BELKIN RTL8192CU 0x2102 RTL8192CU Wireless Adapter product BELKIN F7D2102 0x2103 F7D2102 Wireless Adapter product BELKIN ZD1211B 0x4050 ZD1211B product BELKIN F5D5055 0x5055 F5D5055 product BELKIN F5D7050 0x7050 F5D7050 Wireless Adapter product BELKIN F5D7051 0x7051 F5D7051 54g USB Network Adapter product BELKIN F5D7050A 0x705a F5D7050A Wireless Adapter /* Also sold as 'Ativa 802.11g wireless card' */ product BELKIN F5D7050_V4000 0x705c F5D7050 v4000 Wireless Adapter product BELKIN F5D7050E 0x705e F5D7050E Wireless Adapter product BELKIN RT2870_1 0x8053 RT2870 product BELKIN RT2870_2 0x805c RT2870 product BELKIN F5D8053V3 0x815c F5D8053 v3 product BELKIN RTL8192SU_1 0x815f RTL8192SU product BELKIN RTL8192SU_2 0x845a RTL8192SU product BELKIN RTL8192SU_3 0x945a RTL8192SU product BELKIN F5D8055 0x825a F5D8055 product BELKIN F5D8055V2 0x825b F5D8055 v2 product BELKIN F5D9050V3 0x905b F5D9050 ver 3 Wireless Adapter product BELKIN2 F5U002 0x0002 F5U002 Parallel printer product BELKIN F6D4050V1 0x935a F6D4050 v1 product BELKIN F6D4050V2 0x935b F6D4050 v2 /* Billionton products */ product BILLIONTON USB100 0x0986 USB100N 10/100 FastEthernet product BILLIONTON USBLP100 0x0987 USB100LP product BILLIONTON USBEL100 0x0988 USB100EL product BILLIONTON USBE100 0x8511 USBE100 product BILLIONTON USB2AR 0x90ff USB2AR Ethernet /* Broadcom products */ product BROADCOM BCM2033 0x2033 BCM2033 Bluetooth USB dongle /* Brother Industries products */ product BROTHER HL1050 0x0002 HL-1050 laser printer product BROTHER MFC8600_9650 0x0100 MFC8600/9650 multifunction device /* Behavior Technology Computer products */ product BTC BTC6100 0x5550 6100C Keyboard product BTC BTC7932 0x6782 Keyboard with mouse port /* CACE Technologies products */ product CACE AIRPCAPNX 0x0300 AirPcap NX /* Canon, Inc. products */ product CANON N656U 0x2206 CanoScan N656U product CANON N1220U 0x2207 CanoScan N1220U product CANON D660U 0x2208 CanoScan D660U product CANON N676U 0x220d CanoScan N676U product CANON N1240U 0x220e CanoScan N1240U product CANON LIDE25 0x2220 CanoScan LIDE 25 product CANON S10 0x3041 PowerShot S10 product CANON S100 0x3045 PowerShot S100 product CANON S200 0x3065 PowerShot S200 product CANON REBELXT 0x30ef Digital Rebel XT /* CATC products */ product CATC NETMATE 0x000a Netmate Ethernet product CATC NETMATE2 0x000c Netmate2 Ethernet product CATC CHIEF 0x000d USB Chief Bus & Protocol Analyzer product CATC ANDROMEDA 0x1237 Andromeda hub /* CASIO products */ product CASIO QV_DIGICAM 0x1001 QV DigiCam product CASIO EXS880 0x1105 Exilim EX-S880 product CASIO BE300 0x2002 BE-300 PDA product CASIO NAMELAND 0x4001 CASIO Nameland EZ-USB /* CCYU products */ product CCYU ED1064 0x2136 EasyDisk ED1064 /* Century products */ product CENTURY EX35QUAT 0x011e Century USB Disk Enclosure product CENTURY EX35SW4_SB4 0x011f Century USB Disk Enclosure /* Cherry products */ product CHERRY MY3000KBD 0x0001 My3000 keyboard product CHERRY MY3000HUB 0x0003 My3000 hub product CHERRY CYBOARD 0x0004 CyBoard Keyboard /* Chic Technology products */ product CHIC MOUSE1 0x0001 mouse product CHIC CYPRESS 0x0003 Cypress USB Mouse /* Chicony products */ product CHICONY KB8933 0x0001 KB-8933 keyboard product CHICONY KU0325 0x0116 KU-0325 keyboard product CHICONY CNF7129 0xb071 Notebook Web Camera product CHICONY HDUVCCAM 0xb40a HD UVC WebCam product CHICONY RTL8188CUS_1 0xaff7 RTL8188CUS product CHICONY RTL8188CUS_2 0xaff8 RTL8188CUS product CHICONY RTL8188CUS_3 0xaff9 RTL8188CUS product CHICONY RTL8188CUS_4 0xaffa RTL8188CUS product CHICONY RTL8188CUS_5 0xaffa RTL8188CUS product CHICONY2 TWINKLECAM 0x600d TwinkleCam USB camera /* CH Products */ product CHPRODUCTS PROTHROTTLE 0x00f1 Pro Throttle product CHPRODUCTS PROPEDALS 0x00f2 Pro Pedals product CHPRODUCTS FIGHTERSTICK 0x00f3 Fighterstick product CHPRODUCTS FLIGHTYOKE 0x00ff Flight Sim Yoke /* Cisco-Linksys products */ product CISCOLINKSYS WUSB54AG 0x000c WUSB54AG Wireless Adapter product CISCOLINKSYS WUSB54G 0x000d WUSB54G Wireless Adapter product CISCOLINKSYS WUSB54GP 0x0011 WUSB54GP Wireless Adapter product CISCOLINKSYS USB200MV2 0x0018 USB200M v2 product CISCOLINKSYS HU200TS 0x001a HU200TS Wireless Adapter product CISCOLINKSYS WUSB54GC 0x0020 WUSB54GC product CISCOLINKSYS WUSB54GR 0x0023 WUSB54GR product CISCOLINKSYS WUSBF54G 0x0024 WUSBF54G product CISCOLINKSYS AE1000 0x002f AE1000 product CISCOLINKSYS USB3GIGV1 0x0041 USB3GIGV1 USB Ethernet Adapter product CISCOLINKSYS2 RT3070 0x4001 RT3070 product CISCOLINKSYS3 RT3070 0x0101 RT3070 /* Clipsal products */ product CLIPSAL 560884 0x0101 560884 C-Bus Audio Matrix Switch product CLIPSAL 5500PACA 0x0201 5500PACA C-Bus Pascal Automation Controller product CLIPSAL 5800PC 0x0301 5800PC C-Bus Wireless Interface product CLIPSAL 5500PCU 0x0303 5500PCU C-Bus Interface product CLIPSAL 5000CT2 0x0304 5000CT2 C-Bus Touch Screen product CLIPSAL C5000CT2 0x0305 C5000CT2 C-Bus Touch Screen product CLIPSAL L51xx 0x0401 L51xx C-Bus Dimmer /* CMOTECH products */ product CMOTECH CNU510 0x5141 CDMA Technologies USB modem product CMOTECH CNU550 0x5543 CDMA 2000 1xRTT/1xEVDO USB modem product CMOTECH CGU628 0x6006 CGU-628 product CMOTECH CDMA_MODEM1 0x6280 CDMA Technologies USB modem product CMOTECH DISK 0xf000 disk mode /* Compaq products */ product COMPAQ IPAQPOCKETPC 0x0003 iPAQ PocketPC product COMPAQ PJB100 0x504a Personal Jukebox PJB100 product COMPAQ IPAQLINUX 0x505a iPAQ Linux /* Composite Corp products looks the same as "TANGTOP" */ product COMPOSITE USBPS2 0x0001 USB to PS2 Adaptor /* Conceptronic products */ product CONCEPTRONIC PRISM_GT 0x3762 PrismGT USB 2.0 WLAN product CONCEPTRONIC C11U 0x7100 C11U product CONCEPTRONIC WL210 0x7110 WL-210 product CONCEPTRONIC AR5523_1 0x7801 AR5523 product CONCEPTRONIC AR5523_1_NF 0x7802 AR5523 (no firmware) product CONCEPTRONIC AR5523_2 0x7811 AR5523 product CONCEPTRONIC AR5523_2_NF 0x7812 AR5523 (no firmware) product CONCEPTRONIC2 RTL8192SU_1 0x3300 RTL8192SU product CONCEPTRONIC2 RTL8192SU_2 0x3301 RTL8192SU product CONCEPTRONIC2 RTL8192SU_3 0x3302 RTL8192SU product CONCEPTRONIC2 C54RU 0x3c02 C54RU WLAN product CONCEPTRONIC2 C54RU2 0x3c22 C54RU product CONCEPTRONIC2 RT3070_1 0x3c08 RT3070 product CONCEPTRONIC2 RT3070_2 0x3c11 RT3070 product CONCEPTRONIC2 VIGORN61 0x3c25 VIGORN61 product CONCEPTRONIC2 RT2870_1 0x3c06 RT2870 product CONCEPTRONIC2 RT2870_2 0x3c07 RT2870 product CONCEPTRONIC2 RT2870_7 0x3c09 RT2870 product CONCEPTRONIC2 RT2870_8 0x3c12 RT2870 product CONCEPTRONIC2 RT2870_3 0x3c23 RT2870 product CONCEPTRONIC2 RT2870_4 0x3c25 RT2870 product CONCEPTRONIC2 RT2870_5 0x3c27 RT2870 product CONCEPTRONIC2 RT2870_6 0x3c28 RT2870 /* Connectix products */ product CONNECTIX QUICKCAM 0x0001 QuickCam /* Conect products */ product CONTEC COM1USBH 0x8311 FTDI compatible adapter /* Corega products */ product COREGA ETHER_USB_T 0x0001 Ether USB-T product COREGA FETHER_USB_TX 0x0004 FEther USB-TX product COREGA WLAN_USB_USB_11 0x000c WirelessLAN USB-11 product COREGA FETHER_USB_TXS 0x000d FEther USB-TXS product COREGA WLANUSB 0x0012 Wireless LAN Stick-11 product COREGA FETHER_USB2_TX 0x0017 FEther USB2-TX product COREGA WLUSB_11_KEY 0x001a ULUSB-11 Key product COREGA CGUSBRS232R 0x002a CG-USBRS232R product COREGA CGWLUSB2GL 0x002d CG-WLUSB2GL product COREGA CGWLUSB2GPX 0x002e CG-WLUSB2GPX product COREGA RT2870_1 0x002f RT2870 product COREGA RT2870_2 0x003c RT2870 product COREGA RT2870_3 0x003f RT2870 product COREGA RT3070 0x0041 RT3070 product COREGA CGWLUSB300GNM 0x0042 CG-WLUSB300GNM product COREGA RTL8192SU 0x0047 RTL8192SU product COREGA RTL8192CU 0x0056 RTL8192CU product COREGA WLUSB_11_STICK 0x7613 WLAN USB Stick 11 product COREGA FETHER_USB_TXC 0x9601 FEther USB-TXC /* Corsair products */ product CORSAIR K60 0x0a60 Corsair Vengeance K60 keyboard product CORSAIR K70 0x1b09 Corsair Vengeance K70 keyboard /* Creative products */ product CREATIVE NOMAD_II 0x1002 Nomad II MP3 player product CREATIVE NOMAD_IIMG 0x4004 Nomad II MG product CREATIVE NOMAD 0x4106 Nomad product CREATIVE2 VOIP_BLASTER 0x0258 Voip Blaster product CREATIVE3 OPTICAL_MOUSE 0x0001 Notebook Optical Mouse /* Cambridge Silicon Radio Ltd. products */ product CSR BT_DONGLE 0x0001 Bluetooth USB dongle product CSR CSRDFU 0xffff USB Bluetooth Device in DFU State /* Chipsbank Microelectronics Co., Ltd */ product CHIPSBANK USBMEMSTICK 0x6025 CBM2080 Flash drive controller product CHIPSBANK USBMEMSTICK1 0x6026 CBM1180 Flash drive controller /* CTX products */ product CTX EX1300 0x9999 Ex1300 hub /* Curitel products */ product CURITEL HX550C 0x1101 CDMA 2000 1xRTT USB modem (HX-550C) product CURITEL HX57XB 0x2101 CDMA 2000 1xRTT USB modem (HX-570/575B/PR-600) product CURITEL PC5740 0x3701 Broadband Wireless modem product CURITEL UM150 0x3711 EVDO modem product CURITEL UM175 0x3714 EVDO modem /* CyberPower products */ product CYBERPOWER 1500CAVRLCD 0x0501 1500CAVRLCD /* CyberTAN Technology products */ product CYBERTAN TG54USB 0x1666 TG54USB product CYBERTAN RT2870 0x1828 RT2870 /* Cypress Semiconductor products */ product CYPRESS MOUSE 0x0001 mouse product CYPRESS THERMO 0x0002 thermometer product CYPRESS WISPY1A 0x0bad MetaGeek Wi-Spy product CYPRESS KBDHUB 0x0101 Keyboard/Hub product CYPRESS FMRADIO 0x1002 FM Radio product CYPRESS IKARILASER 0x121f Ikari Laser SteelSeries ApS product CYPRESS USBRS232 0x5500 USB-RS232 Interface product CYPRESS SLIM_HUB 0x6560 Slim Hub product CYPRESS XX6830XX 0x6830 PATA Storage Device product CYPRESS SILVERSHIELD 0xfd13 Gembird Silver Shield PM /* Daisy Technology products */ product DAISY DMC 0x6901 USB MultiMedia Reader /* Dallas Semiconductor products */ product DALLAS J6502 0x4201 J-6502 speakers /* DataApex products */ product DATAAPEX MULTICOM 0xead6 MultiCom /* Dell products */ product DELL PORT 0x0058 Port Replicator product DELL AIO926 0x5115 Photo AIO Printer 926 product DELL BC02 0x8000 BC02 Bluetooth USB Adapter product DELL PRISM_GT_1 0x8102 PrismGT USB 2.0 WLAN product DELL TM350 0x8103 TrueMobile 350 Bluetooth USB Adapter product DELL PRISM_GT_2 0x8104 PrismGT USB 2.0 WLAN product DELL U5700 0x8114 Dell 5700 3G product DELL U5500 0x8115 Dell 5500 3G product DELL U5505 0x8116 Dell 5505 3G product DELL U5700_2 0x8117 Dell 5700 3G product DELL U5510 0x8118 Dell 5510 3G product DELL U5700_3 0x8128 Dell 5700 3G product DELL U5700_4 0x8129 Dell 5700 3G product DELL U5720 0x8133 Dell 5720 3G product DELL U5720_2 0x8134 Dell 5720 3G product DELL U740 0x8135 Dell U740 CDMA product DELL U5520 0x8136 Dell 5520 3G product DELL U5520_2 0x8137 Dell 5520 3G product DELL U5520_3 0x8138 Dell 5520 3G product DELL U5730 0x8180 Dell 5730 3G product DELL U5730_2 0x8181 Dell 5730 3G product DELL U5730_3 0x8182 Dell 5730 3G product DELL DW700 0x9500 Dell DW700 GPS /* Delorme Paublishing products */ product DELORME EARTHMATE 0x0100 Earthmate GPS /* Desknote products */ product DESKNOTE UCR_61S2B 0x0c55 UCR-61S2B /* Diamond products */ product DIAMOND RIO500USB 0x0001 Rio 500 USB /* Dick Smith Electronics (really C-Net) products */ product DICKSMITH RT2573 0x9022 RT2573 product DICKSMITH CWD854F 0x9032 C-Net CWD-854 rev F /* Digi International products */ product DIGI ACCELEPORT2 0x0002 AccelePort USB 2 product DIGI ACCELEPORT4 0x0004 AccelePort USB 4 product DIGI ACCELEPORT8 0x0008 AccelePort USB 8 /* Digianswer A/S products */ product DIGIANSWER ZIGBEE802154 0x000a ZigBee/802.15.4 MAC /* D-Link products */ /*product DLINK DSBS25 0x0100 DSB-S25 serial*/ product DLINK DUBE100 0x1a00 10/100 Ethernet product DLINK DUBE100C1 0x1a02 DUB-E100 rev C1 product DLINK DSB650TX4 0x200c 10/100 Ethernet product DLINK DWL120E 0x3200 DWL-120 rev E product DLINK DWA125D1 0x330f DWA-125 rev D1 product DLINK DWA123D1 0x3310 DWA-123 rev D1 product DLINK DWL122 0x3700 DWL-122 product DLINK DWLG120 0x3701 DWL-G120 product DLINK DWL120F 0x3702 DWL-120 rev F product DLINK DWLAG132 0x3a00 DWL-AG132 product DLINK DWLAG132_NF 0x3a01 DWL-AG132 (no firmware) product DLINK DWLG132 0x3a02 DWL-G132 product DLINK DWLG132_NF 0x3a03 DWL-G132 (no firmware) product DLINK DWLAG122 0x3a04 DWL-AG122 product DLINK DWLAG122_NF 0x3a05 DWL-AG122 (no firmware) product DLINK DWLG122 0x3c00 DWL-G122 b1 Wireless Adapter product DLINK DUBE100B1 0x3c05 DUB-E100 rev B1 product DLINK RT2870 0x3c09 RT2870 product DLINK RT3072 0x3c0a RT3072 product DLINK DWA140B3 0x3c15 DWA-140 rev B3 product DLINK DWA160B2 0x3c1a DWA-160 rev B2 product DLINK DWA127 0x3c1b DWA-127 Wireless Adapter product DLINK DWA162 0x3c1f DWA-162 Wireless Adapter product DLINK DWA140D1 0x3c20 DWA-140 rev D1 product DLINK DSB650C 0x4000 10Mbps Ethernet product DLINK DSB650TX1 0x4001 10/100 Ethernet product DLINK DSB650TX 0x4002 10/100 Ethernet product DLINK DSB650TX_PNA 0x4003 1/10/100 Ethernet product DLINK DSB650TX3 0x400b 10/100 Ethernet product DLINK DSB650TX2 0x4102 10/100 Ethernet product DLINK DUB1312 0x4a00 10/100/1000 Ethernet product DLINK DSB650 0xabc1 10/100 Ethernet product DLINK DUBH7 0xf103 DUB-H7 USB 2.0 7-Port Hub product DLINK DWR510_CD 0xa805 DWR-510 CD-ROM Mode product DLINK DWR510 0x7e12 DWR-510 product DLINK DWM157 0x7d02 DWM-157 product DLINK DWM157_CD 0xa707 DWM-157 CD-ROM Mode product DLINK RTL8188CU 0x3308 RTL8188CU product DLINK RTL8192CU_1 0x3307 RTL8192CU product DLINK RTL8192CU_2 0x3309 RTL8192CU product DLINK RTL8192CU_3 0x330a RTL8192CU product DLINK DWA131B 0x330d DWA-131 rev B product DLINK2 RTL8192SU_1 0x3300 RTL8192SU product DLINK2 RTL8192SU_2 0x3302 RTL8192SU product DLINK2 DWA131A1 0x3303 DWA-131 A1 product DLINK2 DWA160A2 0x3a09 DWA-160 A2 product DLINK2 DWA120 0x3a0c DWA-120 product DLINK2 DWA120_NF 0x3a0d DWA-120 (no firmware) product DLINK2 DWA130D1 0x3a0f DWA-130 D1 product DLINK2 DWLG122C1 0x3c03 DWL-G122 c1 product DLINK2 WUA1340 0x3c04 WUA-1340 product DLINK2 DWA111 0x3c06 DWA-111 product DLINK2 RT2870_1 0x3c09 RT2870 product DLINK2 DWA110 0x3c07 DWA-110 product DLINK2 RT3072 0x3c0a RT3072 product DLINK2 RT3072_1 0x3c0b RT3072 product DLINK2 RT3070_1 0x3c0d RT3070 product DLINK2 RT3070_2 0x3c0e RT3070 product DLINK2 RT3070_3 0x3c0f RT3070 product DLINK2 DWA160A1 0x3c10 DWA-160 A1 product DLINK2 RT2870_2 0x3c11 RT2870 product DLINK2 DWA130 0x3c13 DWA-130 product DLINK2 RT3070_4 0x3c15 RT3070 product DLINK2 RT3070_5 0x3c16 RT3070 product DLINK3 DWM652 0x3e04 DWM-652 /* DisplayLink products */ product DISPLAYLINK LCD4300U 0x01ba LCD-4300U product DISPLAYLINK LCD8000U 0x01bb LCD-8000U product DISPLAYLINK LD220 0x0100 Samsung LD220 product DISPLAYLINK GUC2020 0x0059 IOGEAR DVI GUC2020 product DISPLAYLINK VCUD60 0x0136 Rextron DVI product DISPLAYLINK CONV 0x0138 StarTech CONV-USB2DVI product DISPLAYLINK DLDVI 0x0141 DisplayLink DVI product DISPLAYLINK VGA10 0x015a CMP-USBVGA10 product DISPLAYLINK WSDVI 0x0198 WS Tech DVI product DISPLAYLINK EC008 0x019b EasyCAP008 DVI product DISPLAYLINK HPDOCK 0x01d4 HP USB Docking product DISPLAYLINK NL571 0x01d7 HP USB DVI product DISPLAYLINK M01061 0x01e2 Lenovo DVI product DISPLAYLINK SWDVI 0x024c SUNWEIT DVI product DISPLAYLINK NBDOCK 0x0215 VideoHome NBdock1920 product DISPLAYLINK LUM70 0x02a9 Lilliput UM-70 product DISPLAYLINK UM7X0 0x401a nanovision MiMo product DISPLAYLINK LT1421 0x03e0 Lenovo ThinkVision LT1421 product DISPLAYLINK POLARIS2 0x0117 Polaris2 USB dock product DISPLAYLINK PLUGABLE 0x0377 Plugable docking station /* DMI products */ product DMI CFSM_RW 0xa109 CF/SM Reader/Writer product DMI DISK 0x2bcf Generic Disk /* DrayTek products */ product DRAYTEK VIGOR550 0x0550 Vigor550 /* Dream Link products */ product DREAMLINK DL100B 0x0004 USB Webmail Notifier /* dresden elektronik products */ product DRESDENELEKTRONIK SENSORTERMINALBOARD 0x0001 SensorTerminalBoard product DRESDENELEKTRONIK WIRELESSHANDHELDTERMINAL 0x0004 Wireless Handheld Terminal product DRESDENELEKTRONIK DE_RFNODE 0x001c deRFnode product DRESDENELEKTRONIK LEVELSHIFTERSTICKLOWCOST 0x0022 Levelshifter Stick Low Cost /* DYMO */ product DYMO LABELMANAGERPNP 0x1001 DYMO LabelManager PnP /* Dynastream Innovations */ product DYNASTREAM ANTDEVBOARD 0x1003 ANT dev board product DYNASTREAM ANT2USB 0x1004 ANT2USB product DYNASTREAM ANTDEVBOARD2 0x1006 ANT dev board /* Edimax products */ product EDIMAX EW7318USG 0x7318 USB Wireless dongle product EDIMAX RTL8192SU_1 0x7611 RTL8192SU product EDIMAX RTL8192SU_2 0x7612 RTL8192SU product EDIMAX EW7622UMN 0x7622 EW-7622UMn product EDIMAX RT2870_1 0x7711 RT2870 product EDIMAX EW7717 0x7717 EW-7717 product EDIMAX EW7718 0x7718 EW-7718 product EDIMAX EW7733UND 0x7733 EW-7733UnD product EDIMAX EW7811UN 0x7811 EW-7811Un product EDIMAX RTL8192CU 0x7822 RTL8192CU /* eGalax Products */ product EGALAX TPANEL 0x0001 Touch Panel product EGALAX TPANEL2 0x0002 Touch Panel product EGALAX2 TPANEL 0x0001 Touch Panel /* EGO Products */ product EGO DUMMY 0x0000 Dummy Product product EGO M4U 0x1020 ESI M4U /* Eicon Networks */ product EICON DIVA852 0x4905 Diva 852 ISDN TA /* EIZO products */ product EIZO HUB 0x0000 hub product EIZO MONITOR 0x0001 monitor /* ELCON Systemtechnik products */ product ELCON PLAN 0x0002 Goldpfeil P-LAN /* Elecom products */ product ELECOM MOUSE29UO 0x0002 mouse 29UO product ELECOM LDUSBTX0 0x200c LD-USB/TX product ELECOM LDUSBTX1 0x4002 LD-USB/TX product ELECOM LDUSBLTX 0x4005 LD-USBL/TX product ELECOM WDC150SU2M 0x4008 WDC-150SU2M product ELECOM LDUSBTX2 0x400b LD-USB/TX product ELECOM LDUSB20 0x4010 LD-USB20 product ELECOM UCSGT 0x5003 UC-SGT product ELECOM UCSGT0 0x5004 UC-SGT product ELECOM LDUSBTX3 0xabc1 LD-USB/TX /* Elektor products */ product ELEKTOR FT323R 0x0005 FTDI compatible adapter /* Elsa products */ product ELSA MODEM1 0x2265 ELSA Modem Board product ELSA USB2ETHERNET 0x3000 Microlink USB2Ethernet /* ELV products */ product ELV USBI2C 0xe00f USB-I2C interface /* EMS products */ product EMS DUAL_SHOOTER 0x0003 PSX gun controller converter /* Emtec products */ product EMTEC RUF2PS 0x2240 Flash Drive /* Encore products */ product ENCORE RT3070_1 0x1480 RT3070 product ENCORE RT3070_2 0x14a1 RT3070 product ENCORE RT3070_3 0x14a9 RT3070 /* Entrega products */ product ENTREGA 1S 0x0001 1S serial product ENTREGA 2S 0x0002 2S serial product ENTREGA 1S25 0x0003 1S25 serial product ENTREGA 4S 0x0004 4S serial product ENTREGA E45 0x0005 E45 Ethernet product ENTREGA CENTRONICS 0x0006 Parallel Port product ENTREGA XX1 0x0008 Ethernet product ENTREGA 1S9 0x0093 1S9 serial product ENTREGA EZUSB 0x8000 EZ-USB /*product ENTREGA SERIAL 0x8001 DB25 Serial*/ product ENTREGA 2U4S 0x8004 2U4S serial/usb hub product ENTREGA XX2 0x8005 Ethernet /*product ENTREGA SERIAL_DB9 0x8093 DB9 Serial*/ /* Epson products */ product EPSON PRINTER1 0x0001 USB Printer product EPSON PRINTER2 0x0002 ISD USB Smart Cable for Mac product EPSON PRINTER3 0x0003 ISD USB Smart Cable product EPSON PRINTER5 0x0005 USB Printer product EPSON 636 0x0101 Perfection 636U / 636Photo scanner product EPSON 610 0x0103 Perfection 610 scanner product EPSON 1200 0x0104 Perfection 1200U / 1200Photo scanner product EPSON 1600 0x0107 Expression 1600 scanner product EPSON 1640 0x010a Perfection 1640SU scanner product EPSON 1240 0x010b Perfection 1240U / 1240Photo scanner product EPSON 640U 0x010c Perfection 640U scanner product EPSON 1250 0x010f Perfection 1250U / 1250Photo scanner product EPSON 1650 0x0110 Perfection 1650 scanner product EPSON GT9700F 0x0112 GT-9700F scanner product EPSON GT9300UF 0x011b GT-9300UF scanner product EPSON 3200 0x011c Perfection 3200 scanner product EPSON 1260 0x011d Perfection 1260 scanner product EPSON 1660 0x011e Perfection 1660 scanner product EPSON 1670 0x011f Perfection 1670 scanner product EPSON 1270 0x0120 Perfection 1270 scanner product EPSON 2480 0x0121 Perfection 2480 scanner product EPSON 3590 0x0122 Perfection 3590 scanner product EPSON 4990 0x012a Perfection 4990 Photo scanner product EPSON CRESSI_EDY 0x0521 Cressi Edy diving computer product EPSON N2ITION3 0x0522 Zeagle N2iTion3 diving computer product EPSON STYLUS_875DC 0x0601 Stylus Photo 875DC Card Reader product EPSON STYLUS_895 0x0602 Stylus Photo 895 Card Reader product EPSON CX5400 0x0808 CX5400 scanner product EPSON 3500 0x080e CX-3500/3600/3650 MFP product EPSON RX425 0x080f Stylus Photo RX425 scanner product EPSON DX3800 0x0818 CX3700/CX3800/DX38x0 MFP scanner product EPSON 4800 0x0819 CX4700/CX4800/DX48x0 MFP scanner product EPSON 4200 0x0820 CX4100/CX4200/DX4200 MFP scanner product EPSON 5000 0x082b CX4900/CX5000/DX50x0 MFP scanner product EPSON 6000 0x082e CX5900/CX6000/DX60x0 MFP scanner product EPSON DX4000 0x082f DX4000 MFP scanner product EPSON DX7400 0x0838 CX7300/CX7400/DX7400 MFP scanner product EPSON DX8400 0x0839 CX8300/CX8400/DX8400 MFP scanner product EPSON SX100 0x0841 SX100/NX100 MFP scanner product EPSON NX300 0x0848 NX300 MFP scanner product EPSON SX200 0x0849 SX200/SX205 MFP scanner product EPSON SX400 0x084a SX400/NX400/TX400 MFP scanner /* e-TEK Labs products */ product ETEK 1COM 0x8007 Serial /* Evolution products */ product EVOLUTION ER1 0x0300 FTDI compatible adapter product EVOLUTION HYBRID 0x0302 FTDI compatible adapter product EVOLUTION RCM4 0x0303 FTDI compatible adapter /* Extended Systems products */ product EXTENDED XTNDACCESS 0x0100 XTNDAccess IrDA /* Falcom products */ product FALCOM TWIST 0x0001 USB GSM/GPRS Modem product FALCOM SAMBA 0x0005 FTDI compatible adapter /* FEIYA products */ product FEIYA DUMMY 0x0000 Dummy product product FEIYA 5IN1 0x1132 5-in-1 Card Reader product FEIYA ELANGO 0x6200 MicroSDHC Card Reader product FEIYA AC110 0x6300 AC-110 Card Reader /* FeiXun Communication products */ product FEIXUN RTL8188CU 0x0090 RTL8188CU product FEIXUN RTL8192CU 0x0091 RTL8192CU /* Festo */ product FESTO CPX_USB 0x0102 CPX-USB product FESTO CMSP 0x0501 CMSP /* Fiberline */ product FIBERLINE WL430U 0x6003 WL-430U /* FIC / OpenMoko */ product FIC NEO1973_DEBUG 0x5118 FTDI compatible adapter /* Fossil, Inc products */ product FOSSIL WRISTPDA 0x0002 Wrist PDA /* Foxconn products */ product FOXCONN TCOM_TC_300 0xe000 T-Com TC 300 product FOXCONN PIRELLI_DP_L10 0xe003 Pirelli DP-L10 /* Freecom products */ product FREECOM DVD 0xfc01 DVD drive product FREECOM HDD 0xfc05 Classic SL Hard Drive /* Fujitsu Siemens Computers products */ product FSC E5400 0x1009 PrismGT USB 2.0 WLAN /* Future Technology Devices products */ product FTDI SCX8_USB_PHOENIX 0x5259 SCx8 USB Phoenix interface product FTDI SERIAL_8U100AX 0x8372 8U100AX Serial product FTDI SERIAL_8U232AM 0x6001 8U232AM Serial product FTDI SERIAL_8U232AM4 0x6004 8U232AM Serial product FTDI SERIAL_232RL 0x6006 FT232RL Serial product FTDI SERIAL_2232C 0x6010 FT2232C Dual port Serial product FTDI 232H 0x6014 FTDI compatible adapter product FTDI 232EX 0x6015 FTDI compatible adapter product FTDI SERIAL_2232D 0x9e90 FT2232D Dual port Serial product FTDI SERIAL_4232H 0x6011 FT4232H Quad port Serial product FTDI XDS100V2 0xa6d0 TI XDS100V1/V2 and early Beaglebones product FTDI XDS100V3 0xa6d1 TI XDS100V3 product FTDI KTLINK 0xbbe2 KT-LINK Embedded Hackers Multitool product FTDI TURTELIZER2 0xbdc8 egnite Turtelizer 2 JTAG/RS232 Adapter /* Gude Analog- und Digitalsysteme products also uses FTDI's id: */ product FTDI TACTRIX_OPENPORT_13M 0xcc48 OpenPort 1.3 Mitsubishi product FTDI TACTRIX_OPENPORT_13S 0xcc49 OpenPort 1.3 Subaru product FTDI TACTRIX_OPENPORT_13U 0xcc4a OpenPort 1.3 Universal product FTDI GAMMASCOUT 0xd678 Gamma-Scout product FTDI KBS 0xe6c8 Pyramid KBS USB LCD product FTDI EISCOU 0xe888 Expert ISDN Control USB product FTDI UOPTBR 0xe889 USB-RS232 OptoBridge product FTDI EMCU2D 0xe88a Expert mouseCLOCK USB II product FTDI PCMSFU 0xe88b Precision Clock MSF USB product FTDI EMCU2H 0xe88c Expert mouseCLOCK USB II HBG product FTDI MAXSTREAM 0xee18 Maxstream PKG-U product FTDI USB_UIRT 0xf850 USB-UIRT product FTDI USBSERIAL 0xfa00 Matrix Orbital USB Serial product FTDI MX2_3 0xfa01 Matrix Orbital MX2 or MX3 product FTDI MX4_5 0xfa02 Matrix Orbital MX4 or MX5 product FTDI LK202 0xfa03 Matrix Orbital VK/LK202 Family product FTDI LK204 0xfa04 Matrix Orbital VK/LK204 Family product FTDI CFA_632 0xfc08 Crystalfontz CFA-632 USB LCD product FTDI CFA_634 0xfc09 Crystalfontz CFA-634 USB LCD product FTDI CFA_633 0xfc0b Crystalfontz CFA-633 USB LCD product FTDI CFA_631 0xfc0c Crystalfontz CFA-631 USB LCD product FTDI CFA_635 0xfc0d Crystalfontz CFA-635 USB LCD product FTDI SEMC_DSS20 0xfc82 SEMC DSS-20 SyncStation /* Commerzielle und Technische Informationssysteme GmbH products */ product FTDI CTI_USB_NANO_485 0xf60b CTI USB-Nano 485 product FTDI CTI_USB_MINI_485 0xf608 CTI USB-Mini 485 /* Other products */ product FTDI 232RL 0xfbfa FTDI compatible adapter product FTDI 4N_GALAXY_DE_1 0xf3c0 FTDI compatible adapter product FTDI 4N_GALAXY_DE_2 0xf3c1 FTDI compatible adapter product FTDI 4N_GALAXY_DE_3 0xf3c2 FTDI compatible adapter product FTDI 8U232AM_ALT 0x6006 FTDI compatible adapter product FTDI ACCESSO 0xfad0 FTDI compatible adapter product FTDI ACG_HFDUAL 0xdd20 FTDI compatible adapter product FTDI ACTIVE_ROBOTS 0xe548 FTDI compatible adapter product FTDI ACTZWAVE 0xf2d0 FTDI compatible adapter product FTDI AMC232 0xff00 FTDI compatible adapter product FTDI ARTEMIS 0xdf28 FTDI compatible adapter product FTDI ASK_RDR400 0xc991 FTDI compatible adapter product FTDI ATIK_ATK16 0xdf30 FTDI compatible adapter product FTDI ATIK_ATK16C 0xdf32 FTDI compatible adapter product FTDI ATIK_ATK16HR 0xdf31 FTDI compatible adapter product FTDI ATIK_ATK16HRC 0xdf33 FTDI compatible adapter product FTDI ATIK_ATK16IC 0xdf35 FTDI compatible adapter product FTDI BCS_SE923 0xfb99 FTDI compatible adapter product FTDI CANDAPTER 0x9f80 FTDI compatible adapter product FTDI CANUSB 0xffa8 FTDI compatible adapter product FTDI CCSICDU20_0 0xf9d0 FTDI compatible adapter product FTDI CCSICDU40_1 0xf9d1 FTDI compatible adapter product FTDI CCSICDU64_4 0xf9d4 FTDI compatible adapter product FTDI CCSLOAD_N_GO_3 0xf9d3 FTDI compatible adapter product FTDI CCSMACHX_2 0xf9d2 FTDI compatible adapter product FTDI CCSPRIME8_5 0xf9d5 FTDI compatible adapter product FTDI CHAMSYS_24_MASTER_WING 0xdaf8 FTDI compatible adapter product FTDI CHAMSYS_MAXI_WING 0xdafd FTDI compatible adapter product FTDI CHAMSYS_MEDIA_WING 0xdafe FTDI compatible adapter product FTDI CHAMSYS_MIDI_TIMECODE 0xdafb FTDI compatible adapter product FTDI CHAMSYS_MINI_WING 0xdafc FTDI compatible adapter product FTDI CHAMSYS_PC_WING 0xdaf9 FTDI compatible adapter product FTDI CHAMSYS_USB_DMX 0xdafa FTDI compatible adapter product FTDI CHAMSYS_WING 0xdaff FTDI compatible adapter product FTDI COM4SM 0xd578 FTDI compatible adapter product FTDI CONVERTER_0 0xd388 FTDI compatible adapter product FTDI CONVERTER_1 0xd389 FTDI compatible adapter product FTDI CONVERTER_2 0xd38a FTDI compatible adapter product FTDI CONVERTER_3 0xd38b FTDI compatible adapter product FTDI CONVERTER_4 0xd38c FTDI compatible adapter product FTDI CONVERTER_5 0xd38d FTDI compatible adapter product FTDI CONVERTER_6 0xd38e FTDI compatible adapter product FTDI CONVERTER_7 0xd38f FTDI compatible adapter product FTDI DMX4ALL 0xc850 FTDI compatible adapter product FTDI DOMINTELL_DGQG 0xef50 FTDI compatible adapter product FTDI DOMINTELL_DUSB 0xef51 FTDI compatible adapter product FTDI DOTEC 0x9868 FTDI compatible adapter product FTDI ECLO_COM_1WIRE 0xea90 FTDI compatible adapter product FTDI ECO_PRO_CDS 0xe520 FTDI compatible adapter product FTDI ELSTER_UNICOM 0xe700 FTDI compatible adapter product FTDI ELV_ALC8500 0xf06e FTDI compatible adapter product FTDI ELV_CLI7000 0xfb59 FTDI compatible adapter product FTDI ELV_CSI8 0xe0f0 FTDI compatible adapter product FTDI ELV_EC3000 0xe006 FTDI compatible adapter product FTDI ELV_EM1000DL 0xe0f1 FTDI compatible adapter product FTDI ELV_EM1010PC 0xe0ef FTDI compatible adapter product FTDI ELV_FEM 0xe00a FTDI compatible adapter product FTDI ELV_FHZ1000PC 0xf06f FTDI compatible adapter product FTDI ELV_FHZ1300PC 0xe0e8 FTDI compatible adapter product FTDI ELV_FM3RX 0xe0ed FTDI compatible adapter product FTDI ELV_FS20SIG 0xe0f4 FTDI compatible adapter product FTDI ELV_HS485 0xe0ea FTDI compatible adapter product FTDI ELV_KL100 0xe002 FTDI compatible adapter product FTDI ELV_MSM1 0xe001 FTDI compatible adapter product FTDI ELV_PCD200 0xf06c FTDI compatible adapter product FTDI ELV_PCK100 0xe0f2 FTDI compatible adapter product FTDI ELV_PPS7330 0xfb5c FTDI compatible adapter product FTDI ELV_RFP500 0xe0f3 FTDI compatible adapter product FTDI ELV_T1100 0xf06b FTDI compatible adapter product FTDI ELV_TFD128 0xe0ec FTDI compatible adapter product FTDI ELV_TFM100 0xfb5d FTDI compatible adapter product FTDI ELV_TWS550 0xe009 FTDI compatible adapter product FTDI ELV_UAD8 0xf068 FTDI compatible adapter product FTDI ELV_UDA7 0xf069 FTDI compatible adapter product FTDI ELV_UDF77 0xfb5e FTDI compatible adapter product FTDI ELV_UIO88 0xfb5f FTDI compatible adapter product FTDI ELV_ULA200 0xf06d FTDI compatible adapter product FTDI ELV_UM100 0xfb5a FTDI compatible adapter product FTDI ELV_UMS100 0xe0eb FTDI compatible adapter product FTDI ELV_UO100 0xfb5b FTDI compatible adapter product FTDI ELV_UR100 0xfb58 FTDI compatible adapter product FTDI ELV_USI2 0xf06a FTDI compatible adapter product FTDI ELV_USR 0xe000 FTDI compatible adapter product FTDI ELV_UTP8 0xe0f5 FTDI compatible adapter product FTDI ELV_WS300PC 0xe0f6 FTDI compatible adapter product FTDI ELV_WS444PC 0xe0f7 FTDI compatible adapter product FTDI ELV_WS500 0xe0e9 FTDI compatible adapter product FTDI ELV_WS550 0xe004 FTDI compatible adapter product FTDI ELV_WS777 0xe0ee FTDI compatible adapter product FTDI ELV_WS888 0xe008 FTDI compatible adapter product FTDI FUTURE_0 0xf44a FTDI compatible adapter product FTDI FUTURE_1 0xf44b FTDI compatible adapter product FTDI FUTURE_2 0xf44c FTDI compatible adapter product FTDI GENERIC 0x9378 FTDI compatible adapter product FTDI GUDEADS_E808 0xe808 FTDI compatible adapter product FTDI GUDEADS_E809 0xe809 FTDI compatible adapter product FTDI GUDEADS_E80A 0xe80a FTDI compatible adapter product FTDI GUDEADS_E80B 0xe80b FTDI compatible adapter product FTDI GUDEADS_E80C 0xe80c FTDI compatible adapter product FTDI GUDEADS_E80D 0xe80d FTDI compatible adapter product FTDI GUDEADS_E80E 0xe80e FTDI compatible adapter product FTDI GUDEADS_E80F 0xe80f FTDI compatible adapter product FTDI GUDEADS_E88D 0xe88d FTDI compatible adapter product FTDI GUDEADS_E88E 0xe88e FTDI compatible adapter product FTDI GUDEADS_E88F 0xe88f FTDI compatible adapter product FTDI HD_RADIO 0x937c FTDI compatible adapter product FTDI HO720 0xed72 FTDI compatible adapter product FTDI HO730 0xed73 FTDI compatible adapter product FTDI HO820 0xed74 FTDI compatible adapter product FTDI HO870 0xed71 FTDI compatible adapter product FTDI IBS_APP70 0xff3d FTDI compatible adapter product FTDI IBS_PCMCIA 0xff3a FTDI compatible adapter product FTDI IBS_PEDO 0xff3e FTDI compatible adapter product FTDI IBS_PICPRO 0xff39 FTDI compatible adapter product FTDI IBS_PK1 0xff3b FTDI compatible adapter product FTDI IBS_PROD 0xff3f FTDI compatible adapter product FTDI IBS_RS232MON 0xff3c FTDI compatible adapter product FTDI IBS_US485 0xff38 FTDI compatible adapter product FTDI IPLUS 0xd070 FTDI compatible adapter product FTDI IPLUS2 0xd071 FTDI compatible adapter product FTDI IRTRANS 0xfc60 FTDI compatible adapter product FTDI LENZ_LIUSB 0xd780 FTDI compatible adapter product FTDI LM3S_DEVEL_BOARD 0xbcd8 FTDI compatible adapter product FTDI LM3S_EVAL_BOARD 0xbcd9 FTDI compatible adapter product FTDI LM3S_ICDI_B_BOARD 0xbcda FTDI compatible adapter product FTDI MASTERDEVEL2 0xf449 FTDI compatible adapter product FTDI MHAM_DB9 0xeeed FTDI compatible adapter product FTDI MHAM_IC 0xeeec FTDI compatible adapter product FTDI MHAM_KW 0xeee8 FTDI compatible adapter product FTDI MHAM_RS232 0xeeee FTDI compatible adapter product FTDI MHAM_Y6 0xeeea FTDI compatible adapter product FTDI MHAM_Y8 0xeeeb FTDI compatible adapter product FTDI MHAM_Y9 0xeeef FTDI compatible adapter product FTDI MHAM_YS 0xeee9 FTDI compatible adapter product FTDI MICRO_CHAMELEON 0xcaa0 FTDI compatible adapter product FTDI MTXORB_5 0xfa05 FTDI compatible adapter product FTDI MTXORB_6 0xfa06 FTDI compatible adapter product FTDI NXTCAM 0xabb8 FTDI compatible adapter product FTDI OCEANIC 0xf460 FTDI compatible adapter product FTDI OOCDLINK 0xbaf8 FTDI compatible adapter product FTDI OPENDCC 0xbfd8 FTDI compatible adapter product FTDI OPENDCC_GATEWAY 0xbfdb FTDI compatible adapter product FTDI OPENDCC_GBM 0xbfdc FTDI compatible adapter product FTDI OPENDCC_SNIFFER 0xbfd9 FTDI compatible adapter product FTDI OPENDCC_THROTTLE 0xbfda FTDI compatible adapter product FTDI PCDJ_DAC2 0xfa88 FTDI compatible adapter product FTDI PERLE_ULTRAPORT 0xf0c0 FTDI compatible adapter product FTDI PHI_FISCO 0xe40b FTDI compatible adapter product FTDI PIEGROUP 0xf208 FTDI compatible adapter product FTDI PROPOX_JTAGCABLEII 0xd738 FTDI compatible adapter product FTDI R2000KU_TRUE_RNG 0xfb80 FTDI compatible adapter product FTDI R2X0 0xfc71 FTDI compatible adapter product FTDI RELAIS 0xfa10 FTDI compatible adapter product FTDI REU_TINY 0xed22 FTDI compatible adapter product FTDI RMP200 0xe729 FTDI compatible adapter product FTDI RM_CANVIEW 0xfd60 FTDI compatible adapter product FTDI RRCIRKITS_LOCOBUFFER 0xc7d0 FTDI compatible adapter product FTDI SCIENCESCOPE_HS_LOGBOOK 0xff1d FTDI compatible adapter product FTDI SCIENCESCOPE_LOGBOOKML 0xff18 FTDI compatible adapter product FTDI SCIENCESCOPE_LS_LOGBOOK 0xff1c FTDI compatible adapter product FTDI SCS_DEVICE_0 0xd010 FTDI compatible adapter product FTDI SCS_DEVICE_1 0xd011 FTDI compatible adapter product FTDI SCS_DEVICE_2 0xd012 FTDI compatible adapter product FTDI SCS_DEVICE_3 0xd013 FTDI compatible adapter product FTDI SCS_DEVICE_4 0xd014 FTDI compatible adapter product FTDI SCS_DEVICE_5 0xd015 FTDI compatible adapter product FTDI SCS_DEVICE_6 0xd016 FTDI compatible adapter product FTDI SCS_DEVICE_7 0xd017 FTDI compatible adapter product FTDI SDMUSBQSS 0xf448 FTDI compatible adapter product FTDI SIGNALYZER_SH2 0xbca2 FTDI compatible adapter product FTDI SIGNALYZER_SH4 0xbca4 FTDI compatible adapter product FTDI SIGNALYZER_SLITE 0xbca1 FTDI compatible adapter product FTDI SIGNALYZER_ST 0xbca0 FTDI compatible adapter product FTDI SPECIAL_1 0xfc70 FTDI compatible adapter product FTDI SPECIAL_3 0xfc72 FTDI compatible adapter product FTDI SPECIAL_4 0xfc73 FTDI compatible adapter product FTDI SPROG_II 0xf0c8 FTDI compatible adapter product FTDI SR_RADIO 0x9379 FTDI compatible adapter product FTDI SUUNTO_SPORTS 0xf680 FTDI compatible adapter product FTDI TAVIR_STK500 0xfa33 FTDI compatible adapter product FTDI TERATRONIK_D2XX 0xec89 FTDI compatible adapter product FTDI TERATRONIK_VCP 0xec88 FTDI compatible adapter product FTDI THORLABS 0xfaf0 FTDI compatible adapter product FTDI TNC_X 0xebe0 FTDI compatible adapter product FTDI TTUSB 0xff20 FTDI compatible adapter product FTDI USBX_707 0xf857 FTDI compatible adapter product FTDI USINT_CAT 0xb810 FTDI compatible adapter product FTDI USINT_RS232 0xb812 FTDI compatible adapter product FTDI USINT_WKEY 0xb811 FTDI compatible adapter product FTDI VARDAAN 0xf070 FTDI compatible adapter product FTDI VNHCPCUSB_D 0xfe38 FTDI compatible adapter product FTDI WESTREX_MODEL_777 0xdc00 FTDI compatible adapter product FTDI WESTREX_MODEL_8900F 0xdc01 FTDI compatible adapter product FTDI XF_547 0xfc0a FTDI compatible adapter product FTDI XF_640 0xfc0e FTDI compatible adapter product FTDI XF_642 0xfc0f FTDI compatible adapter product FTDI XM_RADIO 0x937a FTDI compatible adapter product FTDI YEI_SERVOCENTER31 0xe050 FTDI compatible adapter /* Fuji photo products */ product FUJIPHOTO MASS0100 0x0100 Mass Storage /* Fujitsu protducts */ product FUJITSU AH_F401U 0x105b AH-F401U Air H device /* Fujitsu-Siemens protducts */ product FUJITSUSIEMENS SCR 0x0009 Fujitsu-Siemens SCR USB Reader /* Garmin products */ product GARMIN IQUE_3600 0x0004 iQue 3600 /* Gemalto products */ product GEMALTO PROXPU 0x5501 Prox-PU/CU RFID Card Reader /* General Instruments (Motorola) products */ product GENERALINSTMNTS SB5100 0x5100 SURFboard SB5100 Cable modem /* Genesys Logic products */ product GENESYS GL620USB 0x0501 GL620USB Host-Host interface product GENESYS GL650 0x0604 GL650 HUB product GENESYS GL606 0x0606 USB 2.0 HUB product GENESYS GL641USB 0x0700 GL641USB CompactFlash Card Reader product GENESYS GL641USB2IDE_2 0x0701 GL641USB USB-IDE Bridge No 2 product GENESYS GL641USB2IDE 0x0702 GL641USB USB-IDE Bridge product GENESYS GL641USB_2 0x0760 GL641USB 6-in-1 Card Reader /* GIGABYTE products */ product GIGABYTE GN54G 0x8001 GN-54G product GIGABYTE GNBR402W 0x8002 GN-BR402W product GIGABYTE GNWLBM101 0x8003 GN-WLBM101 product GIGABYTE GNWBKG 0x8007 GN-WBKG product GIGABYTE GNWB01GS 0x8008 GN-WB01GS product GIGABYTE GNWI05GS 0x800a GN-WI05GS /* Gigaset products */ product GIGASET WLAN 0x0701 WLAN product GIGASET SMCWUSBTG 0x0710 SMCWUSBT-G product GIGASET SMCWUSBTG_NF 0x0711 SMCWUSBT-G (no firmware) product GIGASET AR5523 0x0712 AR5523 product GIGASET AR5523_NF 0x0713 AR5523 (no firmware) product GIGASET RT2573 0x0722 RT2573 product GIGASET RT3070_1 0x0740 RT3070 product GIGASET RT3070_2 0x0744 RT3070 product GIGABYTE RT2870_1 0x800b RT2870 product GIGABYTE GNWB31N 0x800c GN-WB31N product GIGABYTE GNWB32L 0x800d GN-WB32L /* Global Sun Technology product */ product GLOBALSUN AR5523_1 0x7801 AR5523 product GLOBALSUN AR5523_1_NF 0x7802 AR5523 (no firmware) product GLOBALSUN AR5523_2 0x7811 AR5523 product GLOBALSUN AR5523_2_NF 0x7812 AR5523 (no firmware) /* Globespan products */ product GLOBESPAN PRISM_GT_1 0x2000 PrismGT USB 2.0 WLAN product GLOBESPAN PRISM_GT_2 0x2002 PrismGT USB 2.0 WLAN /* G.Mate, Inc products */ product GMATE YP3X00 0x1001 YP3X00 PDA /* GN Otometrics */ product GNOTOMETRICS USB 0x0010 FTDI compatible adapter /* GoHubs products */ product GOHUBS GOCOM232 0x1001 GoCOM232 Serial /* Good Way Technology products */ product GOODWAY GWUSB2E 0x6200 GWUSB2E product GOODWAY RT2573 0xc019 RT2573 /* Google products */ product GOOGLE NEXUSONE 0x4e11 Nexus One /* Gravis products */ product GRAVIS GAMEPADPRO 0x4001 GamePad Pro /* GREENHOUSE products */ product GREENHOUSE KANA21 0x0001 CF-writer with MP3 /* Griffin Technology */ product GRIFFIN IMATE 0x0405 iMate, ADB Adapter /* Guillemot Corporation */ product GUILLEMOT DALEADER 0xa300 DA Leader product GUILLEMOT HWGUSB254 0xe000 HWGUSB2-54 WLAN product GUILLEMOT HWGUSB254LB 0xe010 HWGUSB2-54-LB product GUILLEMOT HWGUSB254V2AP 0xe020 HWGUSB2-54V2-AP product GUILLEMOT HWNU300 0xe030 HWNU-300 product GUILLEMOT HWNUM300 0xe031 HWNUm-300 product GUILLEMOT HWGUN54 0xe032 HWGUn-54 product GUILLEMOT HWNUP150 0xe033 HWNUP-150 /* Hagiwara products */ product HAGIWARA FGSM 0x0002 FlashGate SmartMedia Card Reader product HAGIWARA FGCF 0x0003 FlashGate CompactFlash Card Reader product HAGIWARA FG 0x0005 FlashGate /* HAL Corporation products */ product HAL IMR001 0x0011 Crossam2+USB IR commander /* Handspring, Inc. */ product HANDSPRING VISOR 0x0100 Handspring Visor product HANDSPRING TREO 0x0200 Handspring Treo product HANDSPRING TREO600 0x0300 Handspring Treo 600 /* Hauppauge Computer Works */ product HAUPPAUGE WINTV_USB_FM 0x4d12 WinTV USB FM product HAUPPAUGE2 NOVAT500 0x9580 NovaT 500Stick /* Hawking Technologies products */ product HAWKING RT2870_1 0x0001 RT2870 product HAWKING RT2870_2 0x0003 RT2870 product HAWKING HWUN2 0x0009 HWUN2 product HAWKING RT3070 0x000b RT3070 product HAWKING RTL8192CU 0x0019 RTL8192CU product HAWKING UF100 0x400c 10/100 USB Ethernet product HAWKING RTL8192SU_1 0x0015 RTL8192SU product HAWKING RTL8192SU_2 0x0016 RTL8192SU /* HID Global GmbH products */ product HIDGLOBAL CM2020 0x0596 Omnikey Cardman 2020 product HIDGLOBAL CM6020 0x1784 Omnikey Cardman 6020 /* Hitachi, Ltd. products */ product HITACHI DVDCAM_DZ_MV100A 0x0004 DVD-CAM DZ-MV100A Camcorder product HITACHI DVDCAM_USB 0x001e DVDCAM USB HS Interface /* HP products */ product HP 895C 0x0004 DeskJet 895C product HP 4100C 0x0101 Scanjet 4100C product HP S20 0x0102 Photosmart S20 product HP 880C 0x0104 DeskJet 880C product HP 4200C 0x0105 ScanJet 4200C product HP CDWRITERPLUS 0x0107 CD-Writer Plus product HP KBDHUB 0x010c Multimedia Keyboard Hub product HP G55XI 0x0111 OfficeJet G55xi product HP HN210W 0x011c HN210W 802.11b WLAN product HP 49GPLUS 0x0121 49g+ graphing calculator product HP 6200C 0x0201 ScanJet 6200C product HP S20b 0x0202 PhotoSmart S20 product HP 815C 0x0204 DeskJet 815C product HP 3300C 0x0205 ScanJet 3300C product HP CDW8200 0x0207 CD-Writer Plus 8200e product HP MMKEYB 0x020c Multimedia keyboard product HP 1220C 0x0212 DeskJet 1220C product HP UN2420_QDL 0x241d UN2420 QDL Firmware Loader product HP UN2420 0x251d UN2420 WWAN/GPS Module product HP 810C 0x0304 DeskJet 810C/812C product HP 4300C 0x0305 Scanjet 4300C product HP CDW4E 0x0307 CD-Writer+ CD-4e product HP G85XI 0x0311 OfficeJet G85xi product HP 1200 0x0317 LaserJet 1200 product HP 5200C 0x0401 Scanjet 5200C product HP 830C 0x0404 DeskJet 830C product HP 3400CSE 0x0405 ScanJet 3400cse product HP 6300C 0x0601 Scanjet 6300C product HP 840C 0x0604 DeskJet 840c product HP 2200C 0x0605 ScanJet 2200C product HP 5300C 0x0701 Scanjet 5300C product HP 4400C 0x0705 Scanjet 4400C product HP 4470C 0x0805 Scanjet 4470C product HP 82x0C 0x0b01 Scanjet 82x0C product HP 2300D 0x0b17 Laserjet 2300d product HP 970CSE 0x1004 Deskjet 970Cse product HP 5400C 0x1005 Scanjet 5400C product HP 2215 0x1016 iPAQ 22xx/Jornada 548 product HP 568J 0x1116 Jornada 568 product HP 930C 0x1204 DeskJet 930c product HP3 RTL8188CU 0x1629 RTL8188CU product HP P2000U 0x1801 Inkjet P-2000U product HP HS2300 0x1e1d HS2300 HSDPA (aka MC8775) product HP 640C 0x2004 DeskJet 640c product HP 4670V 0x3005 ScanJet 4670v product HP P1100 0x3102 Photosmart P1100 product HP LD220 0x3524 LD220 POS Display product HP OJ4215 0x3d11 OfficeJet 4215 product HP HN210E 0x811c Ethernet HN210E product HP2 C500 0x6002 PhotoSmart C500 product HP EV2200 0x1b1d ev2200 HSDPA (aka MC5720) product HP HS2300 0x1e1d hs2300 HSDPA (aka MC8775) /* HTC products */ product HTC WINMOBILE 0x00ce HTC USB Sync product HTC PPC6700MODEM 0x00cf PPC6700 Modem product HTC SMARTPHONE 0x0a51 SmartPhone USB Sync product HTC WIZARD 0x0bce HTC Wizard USB Sync product HTC LEGENDSYNC 0x0c97 HTC Legend USB Sync product HTC LEGEND 0x0ff9 HTC Legend product HTC LEGENDINTERNET 0x0ffe HTC Legend Internet Sharing /* HUAWEI products */ product HUAWEI MOBILE 0x1001 Huawei Mobile product HUAWEI E220 0x1003 HSDPA modem product HUAWEI E220BIS 0x1004 HSDPA modem product HUAWEI E1401 0x1401 3G modem product HUAWEI E1402 0x1402 3G modem product HUAWEI E1403 0x1403 3G modem product HUAWEI E1404 0x1404 3G modem product HUAWEI E1405 0x1405 3G modem product HUAWEI E1406 0x1406 3G modem product HUAWEI E1407 0x1407 3G modem product HUAWEI E1408 0x1408 3G modem product HUAWEI E1409 0x1409 3G modem product HUAWEI E140A 0x140a 3G modem product HUAWEI E140B 0x140b 3G modem product HUAWEI E180V 0x140c E180V product HUAWEI E140D 0x140d 3G modem product HUAWEI E140E 0x140e 3G modem product HUAWEI E140F 0x140f 3G modem product HUAWEI E1410 0x1410 3G modem product HUAWEI E1411 0x1411 3G modem product HUAWEI E1412 0x1412 3G modem product HUAWEI E1413 0x1413 3G modem product HUAWEI E1414 0x1414 3G modem product HUAWEI E1415 0x1415 3G modem product HUAWEI E1416 0x1416 3G modem product HUAWEI E1417 0x1417 3G modem product HUAWEI E1418 0x1418 3G modem product HUAWEI E1419 0x1419 3G modem product HUAWEI E141A 0x141a 3G modem product HUAWEI E141B 0x141b 3G modem product HUAWEI E141C 0x141c 3G modem product HUAWEI E141D 0x141d 3G modem product HUAWEI E141E 0x141e 3G modem product HUAWEI E141F 0x141f 3G modem product HUAWEI E1420 0x1420 3G modem product HUAWEI E1421 0x1421 3G modem product HUAWEI E1422 0x1422 3G modem product HUAWEI E1423 0x1423 3G modem product HUAWEI E1424 0x1424 3G modem product HUAWEI E1425 0x1425 3G modem product HUAWEI E1426 0x1426 3G modem product HUAWEI E1427 0x1427 3G modem product HUAWEI E1428 0x1428 3G modem product HUAWEI E1429 0x1429 3G modem product HUAWEI E142A 0x142a 3G modem product HUAWEI E142B 0x142b 3G modem product HUAWEI E142C 0x142c 3G modem product HUAWEI E142D 0x142d 3G modem product HUAWEI E142E 0x142e 3G modem product HUAWEI E142F 0x142f 3G modem product HUAWEI E1430 0x1430 3G modem product HUAWEI E1431 0x1431 3G modem product HUAWEI E1432 0x1432 3G modem product HUAWEI E1433 0x1433 3G modem product HUAWEI E1434 0x1434 3G modem product HUAWEI E1435 0x1435 3G modem product HUAWEI E1436 0x1436 3G modem product HUAWEI E1437 0x1437 3G modem product HUAWEI E1438 0x1438 3G modem product HUAWEI E1439 0x1439 3G modem product HUAWEI E143A 0x143a 3G modem product HUAWEI E143B 0x143b 3G modem product HUAWEI E143C 0x143c 3G modem product HUAWEI E143D 0x143d 3G modem product HUAWEI E143E 0x143e 3G modem product HUAWEI E143F 0x143f 3G modem product HUAWEI E1752 0x1446 3G modem product HUAWEI K4505 0x1464 3G modem product HUAWEI K3765 0x1465 3G modem product HUAWEI E1820 0x14ac E1820 HSPA+ USB Slider product HUAWEI K3770 0x14c9 3G modem product HUAWEI K3772 0x14cf K3772 product HUAWEI K3770_INIT 0x14d1 K3770 Initial product HUAWEI E3131_INIT 0x14fe 3G modem initial product HUAWEI E392 0x1505 LTE modem product HUAWEI E3131 0x1506 3G modem product HUAWEI K3765_INIT 0x1520 K3765 Initial product HUAWEI K4505_INIT 0x1521 K4505 Initial product HUAWEI K3772_INIT 0x1526 K3772 Initial product HUAWEI E3272_INIT 0x155b LTE modem initial product HUAWEI ME909U 0x1573 LTE modem product HUAWEI R215_INIT 0x1582 LTE modem initial product HUAWEI R215 0x1588 LTE modem product HUAWEI ETS2055 0x1803 CDMA modem product HUAWEI E173 0x1c05 3G modem product HUAWEI E173_INIT 0x1c0b 3G modem initial product HUAWEI E3272 0x1c1e LTE modem /* HUAWEI 3com products */ product HUAWEI3COM WUB320G 0x0009 Aolynk WUB320g /* IBM Corporation */ product IBM USBCDROMDRIVE 0x4427 USB CD-ROM Drive /* Icom products */ product ICOM SP1 0x0004 FTDI compatible adapter product ICOM OPC_U_UC 0x0018 FTDI compatible adapter product ICOM RP2C1 0x0009 FTDI compatible adapter product ICOM RP2C2 0x000a FTDI compatible adapter product ICOM RP2D 0x000b FTDI compatible adapter product ICOM RP2KVR 0x0013 FTDI compatible adapter product ICOM RP2KVT 0x0012 FTDI compatible adapter product ICOM RP2VR 0x000d FTDI compatible adapter product ICOM RP2VT 0x000c FTDI compatible adapter product ICOM RP4KVR 0x0011 FTDI compatible adapter product ICOM RP4KVT 0x0010 FTDI compatible adapter /* ID-tech products */ product IDTECH IDT1221U 0x0300 FTDI compatible adapter /* Imagination Technologies products */ product IMAGINATION DBX1 0x2107 DBX1 DSP core /* Initio Corporation products */ product INITIO DUMMY 0x0000 Dummy product product INITIO INIC_1610P 0x1e40 USB to SATA Bridge /* Inside Out Networks products */ product INSIDEOUT EDGEPORT4 0x0001 EdgePort/4 serial ports /* In-System products */ product INSYSTEM F5U002 0x0002 Parallel printer product INSYSTEM ATAPI 0x0031 ATAPI Adapter product INSYSTEM ISD110 0x0200 IDE Adapter ISD110 product INSYSTEM ISD105 0x0202 IDE Adapter ISD105 product INSYSTEM USBCABLE 0x081a USB cable product INSYSTEM STORAGE_V2 0x5701 USB Storage Adapter V2 /* Intel products */ product INTEL EASYPC_CAMERA 0x0110 Easy PC Camera product INTEL TESTBOARD 0x9890 82930 test board product INTEL2 IRMH 0x0020 Integrated Rate Matching Hub product INTEL2 IRMH2 0x0024 Integrated Rate Matching Hub product INTEL2 IRMH3 0x8000 Integrated Rate Matching Hub product INTEL2 IRMH4 0x8008 Integrated Rate Matching Hub /* Interbiometric products */ product INTERBIOMETRICS IOBOARD 0x1002 FTDI compatible adapter product INTERBIOMETRICS MINI_IOBOARD 0x1006 FTDI compatible adapter /* Intersil products */ product INTERSIL PRISM_GT 0x1000 PrismGT USB 2.0 WLAN product INTERSIL PRISM_2X 0x3642 Prism2.x or Atmel WLAN /* Interpid Control Systems products */ product INTREPIDCS VALUECAN 0x0601 ValueCAN CAN bus interface product INTREPIDCS NEOVI 0x0701 NeoVI Blue vehicle bus interface /* I/O DATA products */ product IODATA IU_CD2 0x0204 DVD Multi-plus unit iU-CD2 product IODATA DVR_UEH8 0x0206 DVD Multi-plus unit DVR-UEH8 product IODATA USBSSMRW 0x0314 USB-SSMRW SD-card product IODATA USBSDRW 0x031e USB-SDRW SD-card product IODATA USBETT 0x0901 USB ETT product IODATA USBETTX 0x0904 USB ETTX product IODATA USBETTXS 0x0913 USB ETTX product IODATA USBWNB11A 0x0919 USB WN-B11 product IODATA USBWNB11 0x0922 USB Airport WN-B11 product IODATA ETGUS2 0x0930 ETG-US2 product IODATA WNGDNUS2 0x093f WN-GDN/US2 product IODATA RT3072_1 0x0944 RT3072 product IODATA RT3072_2 0x0945 RT3072 product IODATA RT3072_3 0x0947 RT3072 product IODATA RT3072_4 0x0948 RT3072 product IODATA USBRSAQ 0x0a03 Serial USB-RSAQ1 product IODATA USBRSAQ5 0x0a0e Serial USB-RSAQ5 product IODATA2 USB2SC 0x0a09 USB2.0-SCSI Bridge USB2-SC /* Iomega products */ product IOMEGA ZIP100 0x0001 Zip 100 product IOMEGA ZIP250 0x0030 Zip 250 /* Ionic products */ product IONICS PLUGCOMPUTER 0x0102 FTDI compatible adapter /* Integrated System Solution Corp. products */ product ISSC ISSCBTA 0x1001 Bluetooth USB Adapter /* iTegno products */ product ITEGNO WM1080A 0x1080 WM1080A GSM/GPRS modem product ITEGNO WM2080A 0x2080 WM2080A CDMA modem /* Ituner networks products */ product ITUNERNET USBLCD2X20 0x0002 USB-LCD 2x20 product ITUNERNET USBLCD4X20 0xc001 USB-LCD 4x20 /* Jablotron products */ product JABLOTRON PC60B 0x0001 PC-60B /* Jaton products */ product JATON EDA 0x5704 Ethernet /* Jeti products */ product JETI SPC1201 0x04b2 FTDI compatible adapter /* JMicron products */ product JMICRON JM20336 0x2336 USB to SATA Bridge product JMICRON JM20337 0x2338 USB to ATA/ATAPI Bridge /* JVC products */ product JVC GR_DX95 0x000a GR-DX95 product JVC MP_PRX1 0x3008 MP-PRX1 Ethernet /* JRC products */ product JRC AH_J3001V_J3002V 0x0001 AirH PHONE AH-J3001V/J3002V /* Kamstrrup products */ product KAMSTRUP OPTICALEYE 0x0001 Optical Eye/3-wire product KAMSTRUP MBUS_250D 0x0005 M-Bus Master MultiPort 250D /* Kawatsu products */ product KAWATSU MH4000P 0x0003 MiniHub 4000P /* Keisokugiken Corp. products */ product KEISOKUGIKEN USBDAQ 0x0068 HKS-0200 USBDAQ /* Kensington products */ product KENSINGTON ORBIT 0x1003 Orbit USB/PS2 trackball product KENSINGTON TURBOBALL 0x1005 TurboBall /* Keyspan products */ product KEYSPAN USA28_NF 0x0101 USA-28 serial Adapter (no firmware) product KEYSPAN USA28X_NF 0x0102 USA-28X serial Adapter (no firmware) product KEYSPAN USA19_NF 0x0103 USA-19 serial Adapter (no firmware) product KEYSPAN USA18_NF 0x0104 USA-18 serial Adapter (no firmware) product KEYSPAN USA18X_NF 0x0105 USA-18X serial Adapter (no firmware) product KEYSPAN USA19W_NF 0x0106 USA-19W serial Adapter (no firmware) product KEYSPAN USA19 0x0107 USA-19 serial Adapter product KEYSPAN USA19W 0x0108 USA-19W serial Adapter product KEYSPAN USA49W_NF 0x0109 USA-49W serial Adapter (no firmware) product KEYSPAN USA49W 0x010a USA-49W serial Adapter product KEYSPAN USA19QI_NF 0x010b USA-19QI serial Adapter (no firmware) product KEYSPAN USA19QI 0x010c USA-19QI serial Adapter product KEYSPAN USA19Q_NF 0x010d USA-19Q serial Adapter (no firmware) product KEYSPAN USA19Q 0x010e USA-19Q serial Adapter product KEYSPAN USA28 0x010f USA-28 serial Adapter product KEYSPAN USA28XXB 0x0110 USA-28X/XB serial Adapter product KEYSPAN USA18 0x0111 USA-18 serial Adapter product KEYSPAN USA18X 0x0112 USA-18X serial Adapter product KEYSPAN USA28XB_NF 0x0113 USA-28XB serial Adapter (no firmware) product KEYSPAN USA28XA_NF 0x0114 USA-28XB serial Adapter (no firmware) product KEYSPAN USA28XA 0x0115 USA-28XA serial Adapter product KEYSPAN USA18XA_NF 0x0116 USA-18XA serial Adapter (no firmware) product KEYSPAN USA18XA 0x0117 USA-18XA serial Adapter product KEYSPAN USA19QW_NF 0x0118 USA-19WQ serial Adapter (no firmware) product KEYSPAN USA19QW 0x0119 USA-19WQ serial Adapter product KEYSPAN USA19HA 0x0121 USA-19HS serial Adapter product KEYSPAN UIA10 0x0201 UIA-10 remote control product KEYSPAN UIA11 0x0202 UIA-11 remote control /* Kingston products */ product KINGSTON XX1 0x0008 Ethernet product KINGSTON KNU101TX 0x000a KNU101TX USB Ethernet product KINGSTON HYPERX3_0 0x162b DT HyperX 3.0 /* Kawasaki products */ product KLSI DUH3E10BT 0x0008 USB Ethernet product KLSI DUH3E10BTN 0x0009 USB Ethernet /* Kobil products */ product KOBIL CONV_B1 0x2020 FTDI compatible adapter product KOBIL CONV_KAAN 0x2021 FTDI compatible adapter /* Kodak products */ product KODAK DC220 0x0100 Digital Science DC220 product KODAK DC260 0x0110 Digital Science DC260 product KODAK DC265 0x0111 Digital Science DC265 product KODAK DC290 0x0112 Digital Science DC290 product KODAK DC240 0x0120 Digital Science DC240 product KODAK DC280 0x0130 Digital Science DC280 /* Kontron AG products */ product KONTRON DM9601 0x8101 USB Ethernet product KONTRON JP1082 0x9700 USB Ethernet /* Konica Corp. Products */ product KONICA CAMERA 0x0720 Digital Color Camera /* KYE products */ product KYE NICHE 0x0001 Niche mouse product KYE NETSCROLL 0x0003 Genius NetScroll mouse product KYE FLIGHT2000 0x1004 Flight 2000 joystick product KYE VIVIDPRO 0x2001 ColorPage Vivid-Pro scanner /* Kyocera products */ product KYOCERA FINECAM_S3X 0x0100 Finecam S3x product KYOCERA FINECAM_S4 0x0101 Finecam S4 product KYOCERA FINECAM_S5 0x0103 Finecam S5 product KYOCERA FINECAM_L3 0x0105 Finecam L3 product KYOCERA AHK3001V 0x0203 AH-K3001V product KYOCERA2 CDMA_MSM_K 0x17da Qualcomm Kyocera CDMA Technologies MSM product KYOCERA2 KPC680 0x180a Qualcomm Kyocera CDMA Technologies MSM /* LaCie products */ product LACIE HD 0xa601 Hard Disk product LACIE CDRW 0xa602 CD R/W /* Lake Shore Cryotronics products */ product LAKESHORE 121 0x0100 121 Current Source product LAKESHORE 218A 0x0200 218A Temperature Monitor product LAKESHORE 219 0x0201 219 Temperature Monitor product LAKESHORE 233 0x0202 233 Temperature Transmitter product LAKESHORE 235 0x0203 235 Temperature Transmitter product LAKESHORE 335 0x0300 335 Temperature Controller product LAKESHORE 336 0x0301 336 Temperature Controller product LAKESHORE 350 0x0302 350 Temperature Controller product LAKESHORE 371 0x0303 371 AC Bridge product LAKESHORE 411 0x0400 411 Handheld Gaussmeter product LAKESHORE 425 0x0401 425 Gaussmeter product LAKESHORE 455A 0x0402 455A DSP Gaussmeter product LAKESHORE 475A 0x0403 475A DSP Gaussmeter product LAKESHORE 465 0x0404 465 Gaussmeter product LAKESHORE 625A 0x0600 625A Magnet PSU product LAKESHORE 642A 0x0601 642A Magnet PSU product LAKESHORE 648 0x0602 648 Magnet PSU product LAKESHORE 737 0x0700 737 VSM Controller product LAKESHORE 776 0x0701 776 Matrix Switch /* Larsen and Brusgaard products */ product LARSENBRUSGAARD ALTITRACK 0x0001 FTDI compatible adapter /* Leadtek products */ product LEADTEK 9531 0x2101 9531 GPS /* Lenovo products */ product LENOVO GIGALAN 0x304b USB 3.0 Ethernet product LENOVO ETHERNET 0x7203 USB 2.0 Ethernet /* Lexar products */ product LEXAR JUMPSHOT 0x0001 jumpSHOT CompactFlash Reader product LEXAR CF_READER 0xb002 USB CF Reader product LEXAR JUMPDRIVE 0xa833 USB Jumpdrive Flash Drive /* Lexmark products */ product LEXMARK S2450 0x0009 Optra S 2450 /* Liebert products */ product LIEBERT POWERSURE_PXT 0xffff PowerSure Personal XT product LIEBERT2 PSI1000 0x0004 UPS PSI 1000 FW:08 /* Link Instruments Inc. products */ product LINKINSTRUMENTS MSO19 0xf190 Link Instruments MSO-19 product LINKINSTRUMENTS MSO28 0xf280 Link Instruments MSO-28 product LINKINSTRUMENTS MSO28_2 0xf281 Link Instruments MSO-28 /* Linksys products */ product LINKSYS MAUSB2 0x0105 Camedia MAUSB-2 product LINKSYS USB10TX1 0x200c USB10TX product LINKSYS USB10T 0x2202 USB10T Ethernet product LINKSYS USB100TX 0x2203 USB100TX Ethernet product LINKSYS USB100H1 0x2204 USB100H1 Ethernet/HPNA product LINKSYS USB10TA 0x2206 USB10TA Ethernet product LINKSYS USB10TX2 0x400b USB10TX product LINKSYS2 WUSB11 0x2219 WUSB11 Wireless Adapter product LINKSYS2 USB200M 0x2226 USB 2.0 10/100 Ethernet product LINKSYS3 WUSB11v28 0x2233 WUSB11 v2.8 Wireless Adapter product LINKSYS4 USB1000 0x0039 USB1000 product LINKSYS4 WUSB100 0x0070 WUSB100 product LINKSYS4 WUSB600N 0x0071 WUSB600N product LINKSYS4 WUSB54GCV2 0x0073 WUSB54GC v2 product LINKSYS4 WUSB54GCV3 0x0077 WUSB54GC v3 product LINKSYS4 RT3070 0x0078 RT3070 product LINKSYS4 WUSB600NV2 0x0079 WUSB600N v2 /* Logilink products */ product LOGILINK DUMMY 0x0000 Dummy product product LOGILINK U2M 0x0101 LogiLink USB MIDI Cable /* Logitech products */ product LOGITECH M2452 0x0203 M2452 keyboard product LOGITECH M4848 0x0301 M4848 mouse product LOGITECH PAGESCAN 0x040f PageScan product LOGITECH QUICKCAMWEB 0x0801 QuickCam Web product LOGITECH QUICKCAMPRO 0x0810 QuickCam Pro product LOGITECH WEBCAMC100 0X0817 Webcam C100 product LOGITECH QUICKCAMEXP 0x0840 QuickCam Express product LOGITECH QUICKCAM 0x0850 QuickCam product LOGITECH QUICKCAMPRO3 0x0990 QuickCam Pro 9000 product LOGITECH N43 0xc000 N43 product LOGITECH N48 0xc001 N48 mouse product LOGITECH MBA47 0xc002 M-BA47 mouse product LOGITECH WMMOUSE 0xc004 WingMan Gaming Mouse product LOGITECH BD58 0xc00c BD58 mouse product LOGITECH UN58A 0xc030 iFeel Mouse product LOGITECH UN53B 0xc032 iFeel MouseMan product LOGITECH WMPAD 0xc208 WingMan GamePad Extreme product LOGITECH WMRPAD 0xc20a WingMan RumblePad product LOGITECH WMJOY 0xc281 WingMan Force joystick product LOGITECH BB13 0xc401 USB-PS/2 Trackball product LOGITECH RK53 0xc501 Cordless mouse product LOGITECH RB6 0xc503 Cordless keyboard product LOGITECH MX700 0xc506 Cordless optical mouse product LOGITECH QUICKCAMPRO2 0xd001 QuickCam Pro /* Logitec Corp. products */ product LOGITEC LDR_H443SU2 0x0033 DVD Multi-plus unit LDR-H443SU2 product LOGITEC LDR_H443U2 0x00b3 DVD Multi-plus unit LDR-H443U2 product LOGITEC LAN_GTJU2A 0x0160 LAN-GTJ/U2A Ethernet product LOGITEC RT2870_1 0x0162 RT2870 product LOGITEC RT2870_2 0x0163 RT2870 product LOGITEC RT2870_3 0x0164 RT2870 product LOGITEC LANW300NU2 0x0166 LAN-W300N/U2 product LOGITEC LANW150NU2 0x0168 LAN-W150N/U2 product LOGITEC LANW300NU2S 0x0169 LAN-W300N/U2S /* Longcheer Holdings, Ltd. products */ product LONGCHEER WM66 0x6061 Longcheer WM66 HSDPA product LONGCHEER W14 0x9603 Mobilcom W14 product LONGCHEER DISK 0xf000 Driver disk product LONGCHEER XSSTICK 0x9605 4G Systems XSStick P14 /* Lucent products */ product LUCENT EVALKIT 0x1001 USS-720 evaluation kit /* Luwen products */ product LUWEN EASYDISK 0x0005 EasyDisc /* Macally products */ product MACALLY MOUSE1 0x0101 mouse /* Mag-Tek products */ product MAGTEK USBSWIPE 0x0002 USB Mag Stripe Swipe Reader /* Marvell Technology Group, Ltd. products */ product MARVELL SHEEVAPLUG 0x9e8f SheevaPlug serial interface /* Matrix Orbital products */ product MATRIXORBITAL FTDI_RANGE_0100 0x0100 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0101 0x0101 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0102 0x0102 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0103 0x0103 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0104 0x0104 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0105 0x0105 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0106 0x0106 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0107 0x0107 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0108 0x0108 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0109 0x0109 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_010A 0x010a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_010B 0x010b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_010C 0x010c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_010D 0x010d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_010E 0x010e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_010F 0x010f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0110 0x0110 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0111 0x0111 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0112 0x0112 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0113 0x0113 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0114 0x0114 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0115 0x0115 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0116 0x0116 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0117 0x0117 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0118 0x0118 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0119 0x0119 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_011A 0x011a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_011B 0x011b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_011C 0x011c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_011D 0x011d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_011E 0x011e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_011F 0x011f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0120 0x0120 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0121 0x0121 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0122 0x0122 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0123 0x0123 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0124 0x0124 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0125 0x0125 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0126 0x0126 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0128 0x0128 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0129 0x0129 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_012A 0x012a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_012B 0x012b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_012D 0x012d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_012E 0x012e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_012F 0x012f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0130 0x0130 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0131 0x0131 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0132 0x0132 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0133 0x0133 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0134 0x0134 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0135 0x0135 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0136 0x0136 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0137 0x0137 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0138 0x0138 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0139 0x0139 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_013A 0x013a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_013B 0x013b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_013C 0x013c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_013D 0x013d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_013E 0x013e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_013F 0x013f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0140 0x0140 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0141 0x0141 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0142 0x0142 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0143 0x0143 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0144 0x0144 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0145 0x0145 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0146 0x0146 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0147 0x0147 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0148 0x0148 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0149 0x0149 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_014A 0x014a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_014B 0x014b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_014C 0x014c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_014D 0x014d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_014E 0x014e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_014F 0x014f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0150 0x0150 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0151 0x0151 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0152 0x0152 FTDI compatible adapter product MATRIXORBITAL MOUA 0x0153 Martrix Orbital MOU-Axxxx LCD displays product MATRIXORBITAL FTDI_RANGE_0159 0x0159 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_015A 0x015a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_015B 0x015b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_015C 0x015c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_015D 0x015d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_015E 0x015e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_015F 0x015f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0160 0x0160 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0161 0x0161 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0162 0x0162 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0163 0x0163 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0164 0x0164 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0165 0x0165 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0166 0x0166 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0167 0x0167 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0168 0x0168 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0169 0x0169 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_016A 0x016a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_016B 0x016b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_016C 0x016c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_016D 0x016d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_016E 0x016e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_016F 0x016f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0170 0x0170 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0171 0x0171 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0172 0x0172 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0173 0x0173 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0174 0x0174 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0175 0x0175 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0176 0x0176 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0177 0x0177 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0178 0x0178 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0179 0x0179 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_017A 0x017a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_017B 0x017b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_017C 0x017c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_017D 0x017d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_017E 0x017e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_017F 0x017f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0180 0x0180 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0181 0x0181 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0182 0x0182 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0183 0x0183 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0184 0x0184 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0185 0x0185 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0186 0x0186 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0187 0x0187 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0188 0x0188 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0189 0x0189 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_018A 0x018a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_018B 0x018b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_018C 0x018c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_018D 0x018d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_018E 0x018e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_018F 0x018f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0190 0x0190 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0191 0x0191 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0192 0x0192 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0193 0x0193 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0194 0x0194 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0195 0x0195 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0196 0x0196 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0197 0x0197 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0198 0x0198 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0199 0x0199 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_019A 0x019a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_019B 0x019b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_019C 0x019c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_019D 0x019d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_019E 0x019e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_019F 0x019f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A0 0x01a0 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A1 0x01a1 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A2 0x01a2 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A3 0x01a3 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A4 0x01a4 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A5 0x01a5 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A6 0x01a6 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A7 0x01a7 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A8 0x01a8 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A9 0x01a9 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01AA 0x01aa FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01AB 0x01ab FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01AC 0x01ac FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01AD 0x01ad FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01AE 0x01ae FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01AF 0x01af FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B0 0x01b0 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B1 0x01b1 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B2 0x01b2 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B3 0x01b3 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B4 0x01b4 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B5 0x01b5 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B6 0x01b6 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B7 0x01b7 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B8 0x01b8 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B9 0x01b9 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01BA 0x01ba FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01BB 0x01bb FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01BC 0x01bc FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01BD 0x01bd FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01BE 0x01be FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01BF 0x01bf FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C0 0x01c0 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C1 0x01c1 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C2 0x01c2 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C3 0x01c3 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C4 0x01c4 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C5 0x01c5 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C6 0x01c6 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C7 0x01c7 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C8 0x01c8 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C9 0x01c9 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01CA 0x01ca FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01CB 0x01cb FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01CC 0x01cc FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01CD 0x01cd FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01CE 0x01ce FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01CF 0x01cf FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D0 0x01d0 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D1 0x01d1 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D2 0x01d2 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D3 0x01d3 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D4 0x01d4 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D5 0x01d5 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D6 0x01d6 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D7 0x01d7 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D8 0x01d8 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D9 0x01d9 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01DA 0x01da FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01DB 0x01db FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01DC 0x01dc FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01DD 0x01dd FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01DE 0x01de FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01DF 0x01df FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E0 0x01e0 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E1 0x01e1 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E2 0x01e2 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E3 0x01e3 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E4 0x01e4 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E5 0x01e5 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E6 0x01e6 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E7 0x01e7 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E8 0x01e8 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E9 0x01e9 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01EA 0x01ea FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01EB 0x01eb FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01EC 0x01ec FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01ED 0x01ed FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01EE 0x01ee FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01EF 0x01ef FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F0 0x01f0 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F1 0x01f1 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F2 0x01f2 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F3 0x01f3 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F4 0x01f4 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F5 0x01f5 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F6 0x01f6 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F7 0x01f7 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F8 0x01f8 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F9 0x01f9 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01FA 0x01fa FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01FB 0x01fb FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01FC 0x01fc FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01FD 0x01fd FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01FE 0x01fe FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01FF 0x01ff FTDI compatible adapter /* MCT Corp. */ product MCT HUB0100 0x0100 Hub product MCT DU_H3SP_USB232 0x0200 D-Link DU-H3SP USB BAY Hub product MCT USB232 0x0210 USB-232 Interface product MCT SITECOM_USB232 0x0230 Sitecom USB-232 Products /* Medeli */ product MEDELI DD305 0x5011 DD305 Digital Drum Set /* MediaTek, Inc. */ product MEDIATEK MTK3329 0x3329 MTK II GPS Receiver /* Meizu Electronics */ product MEIZU M6_SL 0x0140 MiniPlayer M6 (SL) /* Melco, Inc products */ product MELCO LUATX1 0x0001 LUA-TX Ethernet product MELCO LUATX5 0x0005 LUA-TX Ethernet product MELCO LUA2TX5 0x0009 LUA2-TX Ethernet product MELCO LUAKTX 0x0012 LUA-KTX Ethernet product MELCO DUBPXXG 0x001c DUB-PxxG product MELCO LUAU2KTX 0x003d LUA-U2-KTX Ethernet product MELCO KG54YB 0x005e WLI-U2-KG54-YB WLAN product MELCO KG54 0x0066 WLI-U2-KG54 WLAN product MELCO KG54AI 0x0067 WLI-U2-KG54-AI WLAN product MELCO LUA3U2AGT 0x006e LUA3-U2-AGT product MELCO NINWIFI 0x008b Nintendo Wi-Fi product MELCO PCOPRS1 0x00b3 PC-OP-RS1 RemoteStation product MELCO SG54HP 0x00d8 WLI-U2-SG54HP product MELCO G54HP 0x00d9 WLI-U2-G54HP product MELCO KG54L 0x00da WLI-U2-KG54L product MELCO WLIUCG300N 0x00e8 WLI-UC-G300N product MELCO SG54HG 0x00f4 WLI-U2-SG54HG product MELCO WLRUCG 0x0116 WLR-UC-G product MELCO WLRUCGAOSS 0x0119 WLR-UC-G-AOSS product MELCO WLIUCAG300N 0x012e WLI-UC-AG300N product MELCO WLIUCG 0x0137 WLI-UC-G product MELCO WLIUCG300HP 0x0148 WLI-UC-G300HP product MELCO RT2870_2 0x0150 RT2870 product MELCO WLIUCGN 0x015d WLI-UC-GN product MELCO WLIUCG301N 0x016f WLI-UC-G301N product MELCO WLIUCGNM 0x01a2 WLI-UC-GNM product MELCO WLIUCG300HPV1 0x01a8 WLI-UC-G300HP-V1 product MELCO WLIUCGNM2 0x01ee WLI-UC-GNM2 /* Merlin products */ product MERLIN V620 0x1110 Merlin V620 /* MetaGeek products */ product METAGEEK TELLSTICK 0x0c30 FTDI compatible adapter product METAGEEK WISPY1B 0x083e MetaGeek Wi-Spy product METAGEEK WISPY24X 0x083f MetaGeek Wi-Spy 2.4x product METAGEEK2 WISPYDBX 0x5000 MetaGeek Wi-Spy DBx /* Metricom products */ product METRICOM RICOCHET_GS 0x0001 Ricochet GS /* MGE UPS Systems */ product MGE UPS1 0x0001 MGE UPS SYSTEMS PROTECTIONCENTER 1 product MGE UPS2 0xffff MGE UPS SYSTEMS PROTECTIONCENTER 2 /* MEI products */ product MEI CASHFLOW_SC 0x1100 Cashflow-SC Cash Acceptor product MEI S2000 0x1101 Series 2000 Combo Acceptor /* Microdia / Sonix Techonology Co., Ltd. products */ product CHICONY2 YUREX 0x1010 YUREX product CHICONY2 CAM_1 0x62c0 CAM_1 product CHICONY2 TEMPER 0x7401 TEMPer sensor /* Micro Star International products */ product MSI BT_DONGLE 0x1967 Bluetooth USB dongle product MSI RT3070_1 0x3820 RT3070 product MSI RT3070_2 0x3821 RT3070 product MSI RT3070_8 0x3822 RT3070 product MSI RT3070_3 0x3870 RT3070 product MSI RT3070_9 0x3871 RT3070 product MSI UB11B 0x6823 UB11B product MSI RT2570 0x6861 RT2570 product MSI RT2570_2 0x6865 RT2570 product MSI RT2570_3 0x6869 RT2570 product MSI RT2573_1 0x6874 RT2573 product MSI RT2573_2 0x6877 RT2573 product MSI RT3070_4 0x6899 RT3070 product MSI RT3070_5 0x821a RT3070 product MSI RT3070_10 0x822a RT3070 product MSI RT3070_6 0x870a RT3070 product MSI RT3070_11 0x871a RT3070 product MSI RT3070_7 0x899a RT3070 product MSI RT2573_3 0xa861 RT2573 product MSI RT2573_4 0xa874 RT2573 /* Micron products */ product MICRON REALSSD 0x0655 Real SSD eUSB /* Microsoft products */ product MICROSOFT SIDEPREC 0x0008 SideWinder Precision Pro product MICROSOFT INTELLIMOUSE 0x0009 IntelliMouse product MICROSOFT NATURALKBD 0x000b Natural Keyboard Elite product MICROSOFT DDS80 0x0014 Digital Sound System 80 product MICROSOFT SIDEWINDER 0x001a Sidewinder Precision Racing Wheel product MICROSOFT INETPRO 0x001c Internet Keyboard Pro product MICROSOFT TBEXPLORER 0x0024 Trackball Explorer product MICROSOFT INTELLIEYE 0x0025 IntelliEye mouse product MICROSOFT INETPRO2 0x002b Internet Keyboard Pro product MICROSOFT INTELLIMOUSE5 0x0039 IntelliMouse 1.1 5-Button Mouse product MICROSOFT WHEELMOUSE 0x0040 Wheel Mouse Optical product MICROSOFT MN510 0x006e MN510 Wireless product MICROSOFT 700WX 0x0079 Palm 700WX product MICROSOFT MN110 0x007a 10/100 USB NIC product MICROSOFT WLINTELLIMOUSE 0x008c Wireless Optical IntelliMouse product MICROSOFT WLNOTEBOOK 0x00b9 Wireless Optical Mouse (Model 1023) product MICROSOFT COMFORT3000 0x00d1 Comfort Optical Mouse 3000 (Model 1043) product MICROSOFT WLNOTEBOOK3 0x00d2 Wireless Optical Mouse 3000 (Model 1049) product MICROSOFT NATURAL4000 0x00db Natural Ergonomic Keyboard 4000 product MICROSOFT WLNOTEBOOK2 0x00e1 Wireless Optical Mouse 3000 (Model 1056) product MICROSOFT XBOX360 0x0292 XBOX 360 WLAN /* Microtech products */ product MICROTECH SCSIDB25 0x0004 USB-SCSI-DB25 product MICROTECH SCSIHD50 0x0005 USB-SCSI-HD50 product MICROTECH DPCM 0x0006 USB CameraMate product MICROTECH FREECOM 0xfc01 Freecom USB-IDE /* Microtek products */ product MICROTEK 336CX 0x0094 Phantom 336CX - C3 scanner product MICROTEK X6U 0x0099 ScanMaker X6 - X6U product MICROTEK C6 0x009a Phantom C6 scanner product MICROTEK 336CX2 0x00a0 Phantom 336CX - C3 scanner product MICROTEK V6USL 0x00a3 ScanMaker V6USL product MICROTEK V6USL2 0x80a3 ScanMaker V6USL product MICROTEK V6UL 0x80ac ScanMaker V6UL /* Microtune, Inc. products */ product MICROTUNE BT_DONGLE 0x1000 Bluetooth USB dongle /* Midiman products */ product MAUDIO MIDISPORT2X2 0x1001 Midisport 2x2 product MAUDIO FASTTRACKULTRA 0x2080 Fast Track Ultra product MAUDIO FASTTRACKULTRA8R 0x2081 Fast Track Ultra 8R /* MindsAtWork products */ product MINDSATWORK WALLET 0x0001 Digital Wallet /* Minolta Co., Ltd. */ product MINOLTA 2300 0x4001 Dimage 2300 product MINOLTA S304 0x4007 Dimage S304 product MINOLTA X 0x4009 Dimage X product MINOLTA 5400 0x400e Dimage 5400 product MINOLTA F300 0x4011 Dimage F300 product MINOLTA E223 0x4017 Dimage E223 /* Mitsumi products */ product MITSUMI CDRRW 0x0000 CD-R/RW Drive product MITSUMI BT_DONGLE 0x641f Bluetooth USB dongle product MITSUMI FDD 0x6901 USB FDD /* Mobile Action products */ product MOBILEACTION MA620 0x0620 MA-620 Infrared Adapter /* Mobility products */ product MOBILITY USB_SERIAL 0x0202 FTDI compatible adapter product MOBILITY EA 0x0204 Ethernet product MOBILITY EASIDOCK 0x0304 EasiDock Ethernet /* MosChip products */ product MOSCHIP MCS7703 0x7703 MCS7703 Serial Port Adapter product MOSCHIP MCS7730 0x7730 MCS7730 Ethernet product MOSCHIP MCS7820 0x7820 MCS7820 Serial Port Adapter product MOSCHIP MCS7830 0x7830 MCS7830 Ethernet product MOSCHIP MCS7832 0x7832 MCS7832 Ethernet product MOSCHIP MCS7840 0x7840 MCS7840 Serial Port Adapter /* Motorola products */ product MOTOROLA MC141555 0x1555 MC141555 hub controller product MOTOROLA SB4100 0x4100 SB4100 USB Cable Modem product MOTOROLA2 T720C 0x2822 T720c product MOTOROLA2 A41XV32X 0x2a22 A41x/V32x Mobile Phones product MOTOROLA2 E398 0x4810 E398 Mobile Phone product MOTOROLA2 USBLAN 0x600c USBLAN product MOTOROLA2 USBLAN2 0x6027 USBLAN product MOTOROLA2 MB886 0x710f MB886 Mobile Phone (Atria HD) product MOTOROLA4 RT2770 0x9031 RT2770 product MOTOROLA4 RT3070 0x9032 RT3070 /* MpMan products */ product MPMAN MPF400_2 0x25a8 MPF400 Music Player 2Go product MPMAN MPF400_1 0x36d0 MPF400 Music Player 1Go /* MultiTech products */ product MULTITECH ATLAS 0xf101 MT5634ZBA-USB modem /* Mustek products */ product MUSTEK 1200CU 0x0001 1200 CU scanner product MUSTEK 600CU 0x0002 600 CU scanner product MUSTEK 1200USB 0x0003 1200 USB scanner product MUSTEK 1200UB 0x0006 1200 UB scanner product MUSTEK 1200USBPLUS 0x0007 1200 USB Plus scanner product MUSTEK 1200CUPLUS 0x0008 1200 CU Plus scanner product MUSTEK BEARPAW1200F 0x0010 BearPaw 1200F scanner product MUSTEK BEARPAW2400TA 0x0218 BearPaw 2400TA scanner product MUSTEK BEARPAW1200TA 0x021e BearPaw 1200TA scanner product MUSTEK 600USB 0x0873 600 USB scanner product MUSTEK MDC800 0xa800 MDC-800 digital camera /* M-Systems products */ product MSYSTEMS DISKONKEY 0x0010 DiskOnKey product MSYSTEMS DISKONKEY2 0x0011 DiskOnKey /* Myson products */ product MYSON HEDEN_8813 0x8813 USB-IDE product MYSON HEDEN 0x8818 USB-IDE product MYSON HUBREADER 0x8819 COMBO Card reader with USB HUB product MYSON STARREADER 0x9920 USB flash card adapter /* National Semiconductor */ product NATIONAL BEARPAW1200 0x1000 BearPaw 1200 product NATIONAL BEARPAW2400 0x1001 BearPaw 2400 /* NEC products */ product NEC HUB_0050 0x0050 USB 2.0 7-Port Hub product NEC HUB_005A 0x005a USB 2.0 4-Port Hub product NEC WL300NUG 0x0249 WL300NU-G product NEC HUB 0x55aa hub product NEC HUB_B 0x55ab hub /* NEODIO products */ product NEODIO ND3260 0x3260 8-in-1 Multi-format Flash Controller product NEODIO ND5010 0x5010 Multi-format Flash Controller /* Neotel products */ product NEOTEL PRIME 0x4000 Prime USB modem /* Netac products */ product NETAC CF_CARD 0x1060 USB-CF-Card product NETAC ONLYDISK 0x0003 OnlyDisk /* NetChip Technology Products */ product NETCHIP TURBOCONNECT 0x1080 Turbo-Connect product NETCHIP CLIK_40 0xa140 USB Clik! 40 product NETCHIP GADGETZERO 0xa4a0 Linux Gadget Zero product NETCHIP ETHERNETGADGET 0xa4a2 Linux Ethernet/RNDIS gadget on pxa210/25x/26x product NETCHIP POCKETBOOK 0xa4a5 PocketBook /* Netgear products */ product NETGEAR EA101 0x1001 Ethernet product NETGEAR EA101X 0x1002 Ethernet product NETGEAR FA101 0x1020 Ethernet 10/100, USB1.1 product NETGEAR FA120 0x1040 USB 2.0 Ethernet product NETGEAR M4100 0x1100 M4100/M5300/M7100 series switch product NETGEAR WG111V1_2 0x4240 PrismGT USB 2.0 WLAN product NETGEAR WG111V3 0x4260 WG111v3 product NETGEAR WG111U 0x4300 WG111U product NETGEAR WG111U_NF 0x4301 WG111U (no firmware) product NETGEAR WG111V2 0x6a00 WG111V2 product NETGEAR WN111V2 0x9001 WN111V2 product NETGEAR WNDA3100 0x9010 WNDA3100 product NETGEAR WNDA4100 0x9012 WNDA4100 product NETGEAR WNDA3200 0x9018 WNDA3200 product NETGEAR RTL8192CU 0x9021 RTL8192CU product NETGEAR WNA1000 0x9040 WNA1000 product NETGEAR WNA1000M 0x9041 WNA1000M product NETGEAR2 MA101 0x4100 MA101 product NETGEAR2 MA101B 0x4102 MA101 Rev B product NETGEAR3 WG111T 0x4250 WG111T product NETGEAR3 WG111T_NF 0x4251 WG111T (no firmware) product NETGEAR3 WPN111 0x5f00 WPN111 product NETGEAR3 WPN111_NF 0x5f01 WPN111 (no firmware) product NETGEAR3 WPN111_2 0x5f02 WPN111 product NETGEAR4 RTL8188CU 0x9041 RTL8188CU /* NetIndex products */ product NETINDEX WS002IN 0x2001 Willcom WS002IN /* NEWlink */ product NEWLINK USB2IDEBRIDGE 0x00ff USB 2.0 Hard Drive Enclosure /* Nikon products */ product NIKON E990 0x0102 Digital Camera E990 product NIKON LS40 0x4000 CoolScan LS40 ED product NIKON D300 0x041a Digital Camera D300 /* NovaTech Products */ product NOVATECH NV902 0x9020 NovaTech NV-902W product NOVATECH RT2573 0x9021 RT2573 product NOVATECH RTL8188CU 0x9071 RTL8188CU /* Nokia products */ product NOKIA N958GB 0x0070 Nokia N95 8GBc product NOKIA2 CA42 0x1234 CA-42 cable /* Novatel Wireless products */ product NOVATEL V640 0x1100 Merlin V620 product NOVATEL CDMA_MODEM 0x1110 Novatel Wireless Merlin CDMA product NOVATEL V620 0x1110 Merlin V620 product NOVATEL V740 0x1120 Merlin V740 product NOVATEL V720 0x1130 Merlin V720 product NOVATEL U740 0x1400 Merlin U740 product NOVATEL U740_2 0x1410 Merlin U740 product NOVATEL U870 0x1420 Merlin U870 product NOVATEL XU870 0x1430 Merlin XU870 product NOVATEL X950D 0x1450 Merlin X950D product NOVATEL ES620 0x2100 Expedite ES620 product NOVATEL E725 0x2120 Expedite E725 product NOVATEL ES620_2 0x2130 Expedite ES620 product NOVATEL ES620 0x2100 ES620 CDMA product NOVATEL U720 0x2110 Merlin U720 product NOVATEL EU730 0x2400 Expedite EU730 product NOVATEL EU740 0x2410 Expedite EU740 product NOVATEL EU870D 0x2420 Expedite EU870D product NOVATEL U727 0x4100 Merlin U727 CDMA product NOVATEL MC950D 0x4400 Novatel MC950D HSUPA product NOVATEL MC990D 0x7001 Novatel MC990D product NOVATEL ZEROCD 0x5010 Novatel ZeroCD product NOVATEL MIFI2200V 0x5020 Novatel MiFi 2200 CDMA Virgin Mobile product NOVATEL ZEROCD2 0x5030 Novatel ZeroCD product NOVATEL MIFI2200 0x5041 Novatel MiFi 2200 CDMA product NOVATEL U727_2 0x5100 Merlin U727 CDMA product NOVATEL U760 0x6000 Novatel U760 product NOVATEL MC760 0x6002 Novatel MC760 product NOVATEL MC547 0x7042 Novatel MC547 product NOVATEL MC679 0x7031 Novatel MC679 product NOVATEL2 FLEXPACKGPS 0x0100 NovAtel FlexPack GPS receiver /* Merlin products */ product MERLIN V620 0x1110 Merlin V620 /* O2Micro products */ product O2MICRO OZ776_HUB 0x7761 OZ776 hub product O2MICRO OZ776_CCID_SC 0x7772 OZ776 CCID SC Reader /* Olimex products */ product OLIMEX ARM_USB_OCD 0x0003 FTDI compatible adapter product OLIMEX ARM_USB_OCD_H 0x002b FTDI compatible adapter /* Olympus products */ product OLYMPUS C1 0x0102 C-1 Digital Camera product OLYMPUS C700 0x0105 C-700 Ultra Zoom /* OmniVision Technologies, Inc. products */ product OMNIVISION OV511 0x0511 OV511 Camera product OMNIVISION OV511PLUS 0xa511 OV511+ Camera /* OnSpec Electronic, Inc. */ product ONSPEC SDS_HOTFIND_D 0x0400 SDS-infrared.com Hotfind-D Infrared Camera product ONSPEC MDCFE_B_CF_READER 0xa000 MDCFE-B USB CF Reader product ONSPEC CFMS_RW 0xa001 SIIG/Datafab Memory Stick+CF Reader/Writer product ONSPEC READER 0xa003 Datafab-based Reader product ONSPEC CFSM_READER 0xa005 PNY/Datafab CF+SM Reader product ONSPEC CFSM_READER2 0xa006 Simple Tech/Datafab CF+SM Reader product ONSPEC MDSM_B_READER 0xa103 MDSM-B reader product ONSPEC CFSM_COMBO 0xa109 USB to CF + SM Combo (LC1) product ONSPEC UCF100 0xa400 FlashLink UCF-100 CompactFlash Reader product ONSPEC2 IMAGEMATE_SDDR55 0xa103 ImageMate SDDR55 /* Option products */ product OPTION VODAFONEMC3G 0x5000 Vodafone Mobile Connect 3G datacard product OPTION GT3G 0x6000 GlobeTrotter 3G datacard product OPTION GT3GQUAD 0x6300 GlobeTrotter 3G QUAD datacard product OPTION GT3GPLUS 0x6600 GlobeTrotter 3G+ datacard product OPTION GTICON322 0xd033 GlobeTrotter Icon322 storage product OPTION GTMAX36 0x6701 GlobeTrotter Max 3.6 Modem product OPTION GTMAX72 0x6711 GlobeTrotter Max 7.2 HSDPA product OPTION GTHSDPA 0x6971 GlobeTrotter HSDPA product OPTION GTMAXHSUPA 0x7001 GlobeTrotter HSUPA product OPTION GTMAXHSUPAE 0x6901 GlobeTrotter HSUPA PCIe product OPTION GTMAX380HSUPAE 0x7211 GlobeTrotter 380HSUPA PCIe product OPTION GT3G_1 0x6050 3G modem product OPTION GT3G_2 0x6100 3G modem product OPTION GT3G_3 0x6150 3G modem product OPTION GT3G_4 0x6200 3G modem product OPTION GT3G_5 0x6250 3G modem product OPTION GT3G_6 0x6350 3G modem product OPTION E6500 0x6500 3G modem product OPTION E6501 0x6501 3G modem product OPTION E6601 0x6601 3G modem product OPTION E6721 0x6721 3G modem product OPTION E6741 0x6741 3G modem product OPTION E6761 0x6761 3G modem product OPTION E6800 0x6800 3G modem product OPTION E7021 0x7021 3G modem product OPTION E7041 0x7041 3G modem product OPTION E7061 0x7061 3G modem product OPTION E7100 0x7100 3G modem product OPTION GTM380 0x7201 3G modem product OPTION GE40X 0x7601 Globetrotter HSUPA product OPTION GSICON72 0x6911 GlobeSurfer iCON product OPTION GSICONHSUPA 0x7251 Globetrotter HSUPA product OPTION ICON401 0x7401 GlobeSurfer iCON 401 product OPTION GTHSUPA 0x7011 Globetrotter HSUPA product OPTION GMT382 0x7501 Globetrotter HSUPA product OPTION GE40X_1 0x7301 Globetrotter HSUPA product OPTION GE40X_2 0x7361 Globetrotter HSUPA product OPTION GE40X_3 0x7381 Globetrotter HSUPA product OPTION GTM661W 0x9000 GTM661W product OPTION ICONEDGE 0xc031 GlobeSurfer iCON EDGE product OPTION MODHSXPA 0xd013 Globetrotter HSUPA product OPTION ICON321 0xd031 Globetrotter HSUPA product OPTION ICON505 0xd055 Globetrotter iCON 505 product OPTION ICON452 0x7901 Globetrotter iCON 452 /* Optoelectronics Co., Ltd */ product OPTO BARCODE 0x0001 Barcode Reader product OPTO OPTICONCODE 0x0009 Opticon Code Reader product OPTO BARCODE_1 0xa002 Barcode Reader product OPTO CRD7734 0xc000 USB Cradle CRD-7734-RU product OPTO CRD7734_1 0xc001 USB Cradle CRD-7734-RU /* OvisLink product */ product OVISLINK RT3072 0x3072 RT3072 /* OQO */ product OQO WIFI01 0x0002 model 01 WiFi interface product OQO BT01 0x0003 model 01 Bluetooth interface product OQO ETHER01PLUS 0x7720 model 01+ Ethernet product OQO ETHER01 0x8150 model 01 Ethernet interface /* Ours Technology Inc. */ product OTI DKU5 0x6858 DKU-5 Serial /* Owen.ru products */ product OWEN AC4 0x0004 AC4 USB-RS485 converter /* OWL producs */ product OWL CM_160 0xca05 OWL CM-160 power monitor /* Palm Computing, Inc. product */ product PALM SERIAL 0x0080 USB Serial product PALM M500 0x0001 Palm m500 product PALM M505 0x0002 Palm m505 product PALM M515 0x0003 Palm m515 product PALM I705 0x0020 Palm i705 product PALM TUNGSTEN_Z 0x0031 Palm Tungsten Z product PALM M125 0x0040 Palm m125 product PALM M130 0x0050 Palm m130 product PALM TUNGSTEN_T 0x0060 Palm Tungsten T product PALM ZIRE31 0x0061 Palm Zire 31 product PALM ZIRE 0x0070 Palm Zire /* Panasonic products */ product PANASONIC LS120CAM 0x0901 LS-120 Camera product PANASONIC KXL840AN 0x0d01 CD-R Drive KXL-840AN product PANASONIC KXLRW32AN 0x0d09 CD-R Drive KXL-RW32AN product PANASONIC KXLCB20AN 0x0d0a CD-R Drive KXL-CB20AN product PANASONIC KXLCB35AN 0x0d0e DVD-ROM & CD-R/RW product PANASONIC SDCAAE 0x1b00 MultiMediaCard product PANASONIC TYTP50P6S 0x3900 TY-TP50P6-S 50in Touch Panel /* Papouch products */ product PAPOUCH AD4USB 0x8003 FTDI compatible adapter product PAPOUCH AP485 0x0101 FTDI compatible adapter product PAPOUCH AP485_2 0x0104 FTDI compatible adapter product PAPOUCH DRAK5 0x0700 FTDI compatible adapter product PAPOUCH DRAK6 0x1000 FTDI compatible adapter product PAPOUCH GMSR 0x8005 FTDI compatible adapter product PAPOUCH GMUX 0x8004 FTDI compatible adapter product PAPOUCH IRAMP 0x0500 FTDI compatible adapter product PAPOUCH LEC 0x0300 FTDI compatible adapter product PAPOUCH MU 0x8001 FTDI compatible adapter product PAPOUCH QUIDO10X1 0x0b00 FTDI compatible adapter product PAPOUCH QUIDO2X16 0x0e00 FTDI compatible adapter product PAPOUCH QUIDO2X2 0x0a00 FTDI compatible adapter product PAPOUCH QUIDO30X3 0x0c00 FTDI compatible adapter product PAPOUCH QUIDO3X32 0x0f00 FTDI compatible adapter product PAPOUCH QUIDO4X4 0x0900 FTDI compatible adapter product PAPOUCH QUIDO60X3 0x0d00 FTDI compatible adapter product PAPOUCH QUIDO8X8 0x0800 FTDI compatible adapter product PAPOUCH SB232 0x0301 FTDI compatible adapter product PAPOUCH SB422 0x0102 FTDI compatible adapter product PAPOUCH SB422_2 0x0105 FTDI compatible adapter product PAPOUCH SB485 0x0100 FTDI compatible adapter product PAPOUCH SB485C 0x0107 FTDI compatible adapter product PAPOUCH SB485S 0x0106 FTDI compatible adapter product PAPOUCH SB485_2 0x0103 FTDI compatible adapter product PAPOUCH SIMUKEY 0x8002 FTDI compatible adapter product PAPOUCH TMU 0x0400 FTDI compatible adapter product PAPOUCH UPSUSB 0x8000 FTDI compatible adapter /* PARA Industrial products */ product PARA RT3070 0x8888 RT3070 /* Simtec Electronics products */ product SIMTEC ENTROPYKEY 0x0001 Entropy Key /* Pegatron products */ product PEGATRON RT2870 0x0002 RT2870 product PEGATRON RT3070 0x000c RT3070 product PEGATRON RT3070_2 0x000e RT3070 product PEGATRON RT3070_3 0x0010 RT3070 /* Peracom products */ product PERACOM SERIAL1 0x0001 Serial product PERACOM ENET 0x0002 Ethernet product PERACOM ENET3 0x0003 At Home Ethernet product PERACOM ENET2 0x0005 Ethernet /* Philips products */ product PHILIPS DSS350 0x0101 DSS 350 Digital Speaker System product PHILIPS DSS 0x0104 DSS XXX Digital Speaker System product PHILIPS HUB 0x0201 hub product PHILIPS PCA646VC 0x0303 PCA646VC PC Camera product PHILIPS PCVC680K 0x0308 PCVC680K Vesta Pro PC Camera product PHILIPS DSS150 0x0471 DSS 150 Digital Speaker System product PHILIPS ACE1001 0x066a AKTAKOM ACE-1001 cable product PHILIPS SPE3030CC 0x083a USB 2.0 External Disk product PHILIPS SNU5600 0x1236 SNU5600 product PHILIPS UM10016 0x1552 ISP 1581 Hi-Speed USB MPEG2 Encoder Reference Kit product PHILIPS DIVAUSB 0x1801 DIVA USB mp3 player product PHILIPS RT2870 0x200f RT2870 /* Philips Semiconductor products */ product PHILIPSSEMI HUB1122 0x1122 HUB /* Megatec */ product MEGATEC UPS 0x5161 Phoenixtec protocol based UPS /* P.I. Engineering products */ product PIENGINEERING PS2USB 0x020b PS2 to Mac USB Adapter /* Planex Communications products */ product PLANEX GW_US11H 0x14ea GW-US11H WLAN product PLANEX2 RTL8188CUS 0x1201 RTL8188CUS product PLANEX2 GW_US11S 0x3220 GW-US11S WLAN product PLANEX2 GW_US54GXS 0x5303 GW-US54GXS WLAN product PLANEX2 GW_US300 0x5304 GW-US300 product PLANEX2 RTL8188CU_1 0xab2a RTL8188CU product PLANEX2 RTL8188CU_2 0xed17 RTL8188CU product PLANEX2 RTL8188CU_3 0x4902 RTL8188CU product PLANEX2 RTL8188CU_4 0xab2e RTL8188CU product PLANEX2 RTL8192CU 0xab2b RTL8192CU product PLANEX2 GWUS54HP 0xab01 GW-US54HP product PLANEX2 GWUS300MINIS 0xab24 GW-US300MiniS product PLANEX2 RT3070 0xab25 RT3070 product PLANEX2 MZKUE150N 0xab2f MZK-UE150N product PLANEX2 GWUS54MINI2 0xab50 GW-US54Mini2 product PLANEX2 GWUS54SG 0xc002 GW-US54SG product PLANEX2 GWUS54GZL 0xc007 GW-US54GZL product PLANEX2 GWUS54GD 0xed01 GW-US54GD product PLANEX2 GWUSMM 0xed02 GW-USMM product PLANEX2 RT2870 0xed06 RT2870 product PLANEX2 GWUSMICRON 0xed14 GW-USMicroN product PLANEX2 GWUSVALUEEZ 0xed17 GW-USValue-EZ product PLANEX3 GWUS54GZ 0xab10 GW-US54GZ product PLANEX3 GU1000T 0xab11 GU-1000T product PLANEX3 GWUS54MINI 0xab13 GW-US54Mini product PLANEX2 GWUSNANO 0xab28 GW-USNano /* Plextor Corp. */ product PLEXTOR 40_12_40U 0x0011 PlexWriter 40/12/40U /* PLX products */ product PLX TESTBOARD 0x9060 test board product PLX CA42 0xac70 CA-42 /* PNY products */ product PNY ATTACHE2 0x0010 USB 2.0 Flash Drive /* PortGear products */ product PORTGEAR EA8 0x0008 Ethernet product PORTGEAR EA9 0x0009 Ethernet /* Portsmith products */ product PORTSMITH EEA 0x3003 Express Ethernet /* Posiflex products */ product POSIFLEX PP7000 0x0300 FTDI compatible adapter /* Primax products */ product PRIMAX G2X300 0x0300 G2-200 scanner product PRIMAX G2E300 0x0301 G2E-300 scanner product PRIMAX G2300 0x0302 G2-300 scanner product PRIMAX G2E3002 0x0303 G2E-300 scanner product PRIMAX 9600 0x0340 Colorado USB 9600 scanner product PRIMAX 600U 0x0341 Colorado 600u scanner product PRIMAX 6200 0x0345 Visioneer 6200 scanner product PRIMAX 19200 0x0360 Colorado USB 19200 scanner product PRIMAX 1200U 0x0361 Colorado 1200u scanner product PRIMAX G600 0x0380 G2-600 scanner product PRIMAX 636I 0x0381 ReadyScan 636i product PRIMAX G2600 0x0382 G2-600 scanner product PRIMAX G2E600 0x0383 G2E-600 scanner product PRIMAX COMFORT 0x4d01 Comfort product PRIMAX MOUSEINABOX 0x4d02 Mouse-in-a-Box product PRIMAX PCGAUMS1 0x4d04 Sony PCGA-UMS1 product PRIMAX HP_RH304AA 0x4d17 HP RH304AA mouse /* Prolific products */ product PROLIFIC PL2301 0x0000 PL2301 Host-Host interface product PROLIFIC PL2302 0x0001 PL2302 Host-Host interface product PROLIFIC MOTOROLA 0x0307 Motorola Cable product PROLIFIC RSAQ2 0x04bb PL2303 Serial (IODATA USB-RSAQ2) product PROLIFIC ALLTRONIX_GPRS 0x0609 Alltronix ACM003U00 modem product PROLIFIC ALDIGA_AL11U 0x0611 AlDiga AL-11U modem product PROLIFIC MICROMAX_610U 0x0612 Micromax 610U product PROLIFIC DCU11 0x1234 DCU-11 Phone Cable product PROLIFIC UIC_MSR206 0x206a UIC MSR206 Card Reader product PROLIFIC PL2303 0x2303 PL2303 Serial (ATEN/IOGEAR UC232A) product PROLIFIC PL2305 0x2305 Parallel printer product PROLIFIC ATAPI4 0x2307 ATAPI-4 Controller product PROLIFIC PL2501 0x2501 PL2501 Host-Host interface product PROLIFIC PL2506 0x2506 PL2506 USB to IDE Bridge product PROLIFIC HCR331 0x331a HCR331 Hybrid Card Reader product PROLIFIC PHAROS 0xaaa0 Prolific Pharos product PROLIFIC RSAQ3 0xaaa2 PL2303 Serial Adapter (IODATA USB-RSAQ3) product PROLIFIC2 PL2303 0x2303 PL2303 Serial Adapter /* Putercom products */ product PUTERCOM UPA100 0x047e USB-1284 BRIDGE /* Qcom products */ product QCOM RT2573 0x6196 RT2573 product QCOM RT2573_2 0x6229 RT2573 product QCOM RT2573_3 0x6238 RT2573 product QCOM RT2870 0x6259 RT2870 /* QI-hardware */ product QIHARDWARE JTAGSERIAL 0x0713 FTDI compatible adapter /* Qisda products */ product QISDA H21_1 0x4512 3G modem product QISDA H21_2 0x4523 3G modem product QISDA H20_1 0x4515 3G modem product QISDA H20_2 0x4519 3G modem /* Qualcomm products */ product QUALCOMM CDMA_MSM 0x6000 CDMA Technologies MSM phone product QUALCOMM NTT_L02C_MODEM 0x618f NTT DOCOMO L-02C product QUALCOMM NTT_L02C_STORAGE 0x61dd NTT DOCOMO L-02C product QUALCOMM2 MF330 0x6613 MF330 product QUALCOMM2 RWT_FCT 0x3100 RWT FCT-CDMA 2000 1xRTT modem product QUALCOMM2 CDMA_MSM 0x3196 CDMA Technologies MSM modem product QUALCOMM2 AC8700 0x6000 AC8700 product QUALCOMM2 VW110L 0x1000 Vertex Wireless 110L modem product QUALCOMM2 SIM5218 0x9000 SIM5218 product QUALCOMM2 WM620 0x9002 Neoway WM620 product QUALCOMM2 GOBI2000_QDL 0x9204 Qualcomm Gobi 2000 QDL product QUALCOMM2 GOBI2000 0x9205 Qualcomm Gobi 2000 modem product QUALCOMM2 VT80N 0x6500 Venus VT80N product QUALCOMM3 VFAST2 0x9909 Venus Fast2 modem product QUALCOMMINC CDMA_MSM 0x0001 CDMA Technologies MSM modem product QUALCOMMINC E0002 0x0002 3G modem product QUALCOMMINC E0003 0x0003 3G modem product QUALCOMMINC E0004 0x0004 3G modem product QUALCOMMINC E0005 0x0005 3G modem product QUALCOMMINC E0006 0x0006 3G modem product QUALCOMMINC E0007 0x0007 3G modem product QUALCOMMINC E0008 0x0008 3G modem product QUALCOMMINC E0009 0x0009 3G modem product QUALCOMMINC E000A 0x000a 3G modem product QUALCOMMINC E000B 0x000b 3G modem product QUALCOMMINC E000C 0x000c 3G modem product QUALCOMMINC E000D 0x000d 3G modem product QUALCOMMINC E000E 0x000e 3G modem product QUALCOMMINC E000F 0x000f 3G modem product QUALCOMMINC E0010 0x0010 3G modem product QUALCOMMINC E0011 0x0011 3G modem product QUALCOMMINC E0012 0x0012 3G modem product QUALCOMMINC E0013 0x0013 3G modem product QUALCOMMINC E0014 0x0014 3G modem product QUALCOMMINC MF628 0x0015 3G modem product QUALCOMMINC MF633R 0x0016 ZTE WCDMA modem product QUALCOMMINC E0017 0x0017 3G modem product QUALCOMMINC E0018 0x0018 3G modem product QUALCOMMINC E0019 0x0019 3G modem product QUALCOMMINC E0020 0x0020 3G modem product QUALCOMMINC E0021 0x0021 3G modem product QUALCOMMINC E0022 0x0022 3G modem product QUALCOMMINC E0023 0x0023 3G modem product QUALCOMMINC E0024 0x0024 3G modem product QUALCOMMINC E0025 0x0025 3G modem product QUALCOMMINC E0026 0x0026 3G modem product QUALCOMMINC E0027 0x0027 3G modem product QUALCOMMINC E0028 0x0028 3G modem product QUALCOMMINC E0029 0x0029 3G modem product QUALCOMMINC E0030 0x0030 3G modem product QUALCOMMINC MF626 0x0031 3G modem product QUALCOMMINC E0032 0x0032 3G modem product QUALCOMMINC E0033 0x0033 3G modem product QUALCOMMINC E0037 0x0037 3G modem product QUALCOMMINC E0039 0x0039 3G modem product QUALCOMMINC E0042 0x0042 3G modem product QUALCOMMINC E0043 0x0043 3G modem product QUALCOMMINC E0048 0x0048 3G modem product QUALCOMMINC E0049 0x0049 3G modem product QUALCOMMINC E0051 0x0051 3G modem product QUALCOMMINC E0052 0x0052 3G modem product QUALCOMMINC ZTE_STOR2 0x0053 USB ZTE Storage product QUALCOMMINC E0054 0x0054 3G modem product QUALCOMMINC E0055 0x0055 3G modem product QUALCOMMINC E0057 0x0057 3G modem product QUALCOMMINC E0058 0x0058 3G modem product QUALCOMMINC E0059 0x0059 3G modem product QUALCOMMINC E0060 0x0060 3G modem product QUALCOMMINC E0061 0x0061 3G modem product QUALCOMMINC E0062 0x0062 3G modem product QUALCOMMINC E0063 0x0063 3G modem product QUALCOMMINC E0064 0x0064 3G modem product QUALCOMMINC E0066 0x0066 3G modem product QUALCOMMINC E0069 0x0069 3G modem product QUALCOMMINC E0070 0x0070 3G modem product QUALCOMMINC E0073 0x0073 3G modem product QUALCOMMINC E0076 0x0076 3G modem product QUALCOMMINC E0078 0x0078 3G modem product QUALCOMMINC E0082 0x0082 3G modem product QUALCOMMINC E0086 0x0086 3G modem product QUALCOMMINC MF112 0x0103 3G modem product QUALCOMMINC SURFSTICK 0x0117 1&1 Surf Stick product QUALCOMMINC K3772_Z_INIT 0x1179 K3772-Z Initial product QUALCOMMINC K3772_Z 0x1181 K3772-Z product QUALCOMMINC ZTE_MF730M 0x1420 3G modem product QUALCOMMINC MF195E_INIT 0x1514 MF195E initial product QUALCOMMINC MF195E 0x1516 MF195E product QUALCOMMINC ZTE_STOR 0x2000 USB ZTE Storage product QUALCOMMINC E2002 0x2002 3G modem product QUALCOMMINC E2003 0x2003 3G modem product QUALCOMMINC AC682 0xffdd CDMA 1xEVDO USB modem product QUALCOMMINC AC682_INIT 0xffde CDMA 1xEVDO USB modem (initial) product QUALCOMMINC AC8710 0xfff1 3G modem product QUALCOMMINC AC2726 0xfff5 3G modem product QUALCOMMINC AC8700 0xfffe CDMA 1xEVDO USB modem /* Quanta products */ product QUANTA RW6815_1 0x00ce HP iPAQ rw6815 product QUANTA RT3070 0x0304 RT3070 product QUANTA Q101_STOR 0x1000 USB Q101 Storage product QUANTA Q101 0xea02 HSDPA modem product QUANTA Q111 0xea03 HSDPA modem product QUANTA GLX 0xea04 HSDPA modem product QUANTA GKE 0xea05 HSDPA modem product QUANTA GLE 0xea06 HSDPA modem product QUANTA RW6815R 0xf003 HP iPAQ rw6815 RNDIS /* Qtronix products */ product QTRONIX 980N 0x2011 Scorpion-980N keyboard /* Quickshot products */ product QUICKSHOT STRIKEPAD 0x6238 USB StrikePad /* Radio Shack */ product RADIOSHACK USBCABLE 0x4026 USB to Serial Cable /* Rainbow Technologies products */ product RAINBOW IKEY2000 0x1200 i-Key 2000 /* Ralink Technology products */ product RALINK RT2570 0x1706 RT2500USB Wireless Adapter product RALINK RT2070 0x2070 RT2070 product RALINK RT2570_2 0x2570 RT2500USB Wireless Adapter product RALINK RT2573 0x2573 RT2501USB Wireless Adapter product RALINK RT2671 0x2671 RT2601USB Wireless Adapter product RALINK RT2770 0x2770 RT2770 product RALINK RT2870 0x2870 RT2870 product RALINK RT_STOR 0x2878 USB Storage product RALINK RT3070 0x3070 RT3070 product RALINK RT3071 0x3071 RT3071 product RALINK RT3072 0x3072 RT3072 product RALINK RT3370 0x3370 RT3370 product RALINK RT3572 0x3572 RT3572 product RALINK RT3573 0x3573 RT3573 product RALINK RT5370 0x5370 RT5370 product RALINK RT5572 0x5572 RT5572 product RALINK RT8070 0x8070 RT8070 product RALINK RT2570_3 0x9020 RT2500USB Wireless Adapter product RALINK RT2573_2 0x9021 RT2501USB Wireless Adapter /* RATOC Systems products */ product RATOC REXUSB60 0xb000 USB serial adapter REX-USB60 product RATOC REXUSB60F 0xb020 USB serial adapter REX-USB60F /* Realtek products */ /* Green House and CompUSA OEM this part */ product REALTEK DUMMY 0x0000 Dummy product product REALTEK USB20CRW 0x0158 USB20CRW Card Reader product REALTEK RTL8188ETV 0x0179 RTL8188ETV product REALTEK RTL8188CTV 0x018a RTL8188CTV product REALTEK USBKR100 0x8150 USBKR100 USB Ethernet product REALTEK RTL8152 0x8152 RTL8152 USB Ethernet product REALTEK RTL8153 0x8153 RTL8153 USB Ethernet product REALTEK RTL8188CE_0 0x8170 RTL8188CE product REALTEK RTL8171 0x8171 RTL8171 product REALTEK RTL8172 0x8172 RTL8172 product REALTEK RTL8173 0x8173 RTL8173 product REALTEK RTL8174 0x8174 RTL8174 product REALTEK RTL8188CU_0 0x8176 RTL8188CU product REALTEK RTL8188EU 0x8179 RTL8188EU product REALTEK RTL8188CE_1 0x817e RTL8188CE product REALTEK RTL8188CU_1 0x817a RTL8188CU product REALTEK RTL8188CU_2 0x817b RTL8188CU product REALTEK RTL8187 0x8187 RTL8187 Wireless Adapter product REALTEK RTL8187B_0 0x8189 RTL8187B Wireless Adapter product REALTEK RTL8188CU_3 0x8191 RTL8188CU product REALTEK RTL8196EU 0x8196 RTL8196EU product REALTEK RTL8187B_1 0x8197 RTL8187B Wireless Adapter product REALTEK RTL8187B_2 0x8198 RTL8187B Wireless Adapter product REALTEK RTL8188CUS 0x818a RTL8188CUS product REALTEK RTL8188CU_COMBO 0x8754 RTL8188CU product REALTEK RTL8191CU 0x8177 RTL8191CU product REALTEK RTL8192CU 0x8178 RTL8192CU product REALTEK RTL8192CE 0x817c RTL8192CE product REALTEK RTL8188RU_1 0x817d RTL8188RU product REALTEK RTL8188RU_3 0x817f RTL8188RU product REALTEK RTL8712 0x8712 RTL8712 product REALTEK RTL8713 0x8712 RTL8713 product REALTEK RTL8188RU_2 0x317f RTL8188RU product REALTEK RTL8192SU 0xc512 RTL8192SU /* RedOctane products */ product REDOCTANE DUMMY 0x0000 Dummy product product REDOCTANE GHMIDI 0x474b GH MIDI INTERFACE /* Renesas products */ product RENESAS RX610 0x0053 RX610 RX-Stick /* Ricoh products */ product RICOH VGPVCC2 0x1830 VGP-VCC2 Camera product RICOH VGPVCC3 0x1832 VGP-VCC3 Camera product RICOH VGPVCC2_2 0x1833 VGP-VCC2 Camera product RICOH VGPVCC2_3 0x1834 VGP-VCC2 Camera product RICOH VGPVCC7 0x183a VGP-VCC7 Camera product RICOH VGPVCC8 0x183b VGP-VCC8 Camera /* Reiner-SCT products */ product REINERSCT CYBERJACK_ECOM 0x0100 e-com cyberJack /* Roland products */ product ROLAND UA100 0x0000 UA-100 Audio I/F product ROLAND UM4 0x0002 UM-4 MIDI I/F product ROLAND SC8850 0x0003 SC-8850 MIDI Synth product ROLAND U8 0x0004 U-8 Audio I/F product ROLAND UM2 0x0005 UM-2 MIDI I/F product ROLAND SC8820 0x0007 SC-8820 MIDI Synth product ROLAND PC300 0x0008 PC-300 MIDI Keyboard product ROLAND UM1 0x0009 UM-1 MIDI I/F product ROLAND SK500 0x000b SK-500 MIDI Keyboard product ROLAND SCD70 0x000c SC-D70 MIDI Synth product ROLAND UM880N 0x0014 EDIROL UM-880 MIDI I/F (native) product ROLAND UM880G 0x0015 EDIROL UM-880 MIDI I/F (generic) product ROLAND SD90 0x0016 SD-90 MIDI Synth product ROLAND UM550 0x0023 UM-550 MIDI I/F product ROLAND SD20 0x0027 SD-20 MIDI Synth product ROLAND SD80 0x0029 SD-80 MIDI Synth product ROLAND UA700 0x002b UA-700 Audio I/F /* Rockfire products */ product ROCKFIRE GAMEPAD 0x2033 gamepad 203USB /* RATOC Systems products */ product RATOC REXUSB60 0xb000 REX-USB60 product RATOC REXUSB60F 0xb020 REX-USB60F /* RT system products */ product RTSYSTEMS CT29B 0x9e54 FTDI compatible adapter product RTSYSTEMS SERIAL_VX7 0x9e52 FTDI compatible adapter /* Sagem products */ product SAGEM USBSERIAL 0x0027 USB-Serial Controller product SAGEM XG760A 0x004a XG-760A product SAGEM XG76NA 0x0062 XG-76NA /* Samsung products */ product SAMSUNG WIS09ABGN 0x2018 WIS09ABGN Wireless LAN adapter product SAMSUNG ML6060 0x3008 ML-6060 laser printer product SAMSUNG YP_U2 0x5050 YP-U2 MP3 Player product SAMSUNG YP_U4 0x5092 YP-U4 MP3 Player product SAMSUNG I500 0x6601 I500 Palm USB Phone product SAMSUNG I330 0x8001 I330 phone cradle product SAMSUNG2 RT2870_1 0x2018 RT2870 /* Samsung Techwin products */ product SAMSUNG_TECHWIN DIGIMAX_410 0x000a Digimax 410 /* SanDisk products */ product SANDISK SDDR05A 0x0001 ImageMate SDDR-05a product SANDISK SDDR31 0x0002 ImageMate SDDR-31 product SANDISK SDDR05 0x0005 ImageMate SDDR-05 product SANDISK SDDR12 0x0100 ImageMate SDDR-12 product SANDISK SDDR09 0x0200 ImageMate SDDR-09 product SANDISK SDDR75 0x0810 ImageMate SDDR-75 product SANDISK SDCZ2_128 0x7100 Cruzer Mini 128MB product SANDISK SDCZ2_256 0x7104 Cruzer Mini 256MB product SANDISK SDCZ4_128 0x7112 Cruzer Micro 128MB product SANDISK SDCZ4_256 0x7113 Cruzer Micro 256MB product SANDISK IMAGEMATE_SDDR289 0xb6ba ImageMate SDDR-289 /* Sanwa Electric Instrument Co., Ltd. products */ product SANWA KB_USB2 0x0701 KB-USB2 multimeter cable /* Sanyo Electric products */ product SANYO SCP4900 0x0701 Sanyo SCP-4900 USB Phone /* ScanLogic products */ product SCANLOGIC SL11R 0x0002 SL11R IDE Adapter product SCANLOGIC 336CX 0x0300 Phantom 336CX - C3 scanner /* Schweitzer Engineering Laboratories products */ product SEL C662 0x0001 C662 Cable /* Sealevel products */ product SEALEVEL 2101 0x2101 FTDI compatible adapter product SEALEVEL 2102 0x2102 FTDI compatible adapter product SEALEVEL 2103 0x2103 FTDI compatible adapter product SEALEVEL 2104 0x2104 FTDI compatible adapter product SEALEVEL 2106 0x9020 FTDI compatible adapter product SEALEVEL 2201_1 0x2211 FTDI compatible adapter product SEALEVEL 2201_2 0x2221 FTDI compatible adapter product SEALEVEL 2202_1 0x2212 FTDI compatible adapter product SEALEVEL 2202_2 0x2222 FTDI compatible adapter product SEALEVEL 2203_1 0x2213 FTDI compatible adapter product SEALEVEL 2203_2 0x2223 FTDI compatible adapter product SEALEVEL 2401_1 0x2411 FTDI compatible adapter product SEALEVEL 2401_2 0x2421 FTDI compatible adapter product SEALEVEL 2401_3 0x2431 FTDI compatible adapter product SEALEVEL 2401_4 0x2441 FTDI compatible adapter product SEALEVEL 2402_1 0x2412 FTDI compatible adapter product SEALEVEL 2402_2 0x2422 FTDI compatible adapter product SEALEVEL 2402_3 0x2432 FTDI compatible adapter product SEALEVEL 2402_4 0x2442 FTDI compatible adapter product SEALEVEL 2403_1 0x2413 FTDI compatible adapter product SEALEVEL 2403_2 0x2423 FTDI compatible adapter product SEALEVEL 2403_3 0x2433 FTDI compatible adapter product SEALEVEL 2403_4 0x2443 FTDI compatible adapter product SEALEVEL 2801_1 0x2811 FTDI compatible adapter product SEALEVEL 2801_2 0x2821 FTDI compatible adapter product SEALEVEL 2801_3 0x2831 FTDI compatible adapter product SEALEVEL 2801_4 0x2841 FTDI compatible adapter product SEALEVEL 2801_5 0x2851 FTDI compatible adapter product SEALEVEL 2801_6 0x2861 FTDI compatible adapter product SEALEVEL 2801_7 0x2871 FTDI compatible adapter product SEALEVEL 2801_8 0x2881 FTDI compatible adapter product SEALEVEL 2802_1 0x2812 FTDI compatible adapter product SEALEVEL 2802_2 0x2822 FTDI compatible adapter product SEALEVEL 2802_3 0x2832 FTDI compatible adapter product SEALEVEL 2802_4 0x2842 FTDI compatible adapter product SEALEVEL 2802_5 0x2852 FTDI compatible adapter product SEALEVEL 2802_6 0x2862 FTDI compatible adapter product SEALEVEL 2802_7 0x2872 FTDI compatible adapter product SEALEVEL 2802_8 0x2882 FTDI compatible adapter product SEALEVEL 2803_1 0x2813 FTDI compatible adapter product SEALEVEL 2803_2 0x2823 FTDI compatible adapter product SEALEVEL 2803_3 0x2833 FTDI compatible adapter product SEALEVEL 2803_4 0x2843 FTDI compatible adapter product SEALEVEL 2803_5 0x2853 FTDI compatible adapter product SEALEVEL 2803_6 0x2863 FTDI compatible adapter product SEALEVEL 2803_7 0x2873 FTDI compatible adapter product SEALEVEL 2803_8 0x2883 FTDI compatible adapter /* Senao products */ product SENAO RT2870_3 0x0605 RT2870 product SENAO RT2870_4 0x0615 RT2870 product SENAO NUB8301 0x2000 NUB-8301 product SENAO RT2870_1 0x9701 RT2870 product SENAO RT2870_2 0x9702 RT2870 product SENAO RT3070 0x9703 RT3070 product SENAO RT3071 0x9705 RT3071 product SENAO RT3072_1 0x9706 RT3072 product SENAO RT3072_2 0x9707 RT3072 product SENAO RT3072_3 0x9708 RT3072 product SENAO RT3072_4 0x9709 RT3072 product SENAO RT3072_5 0x9801 RT3072 product SENAO RTL8192SU_1 0x9603 RTL8192SU product SENAO RTL8192SU_2 0x9605 RTL8192SU /* ShanTou products */ product SHANTOU ST268 0x0268 ST268 product SHANTOU DM9601 0x9601 DM 9601 product SHANTOU ADM8515 0x8515 ADM8515 /* Shark products */ product SHARK PA 0x0400 Pocket Adapter /* Sharp products */ product SHARP SL5500 0x8004 Zaurus SL-5500 PDA product SHARP SLA300 0x8005 Zaurus SL-A300 PDA product SHARP SL5600 0x8006 Zaurus SL-5600 PDA product SHARP SLC700 0x8007 Zaurus SL-C700 PDA product SHARP SLC750 0x9031 Zaurus SL-C750 PDA product SHARP WZERO3ES 0x9123 W-ZERO3 ES Smartphone product SHARP WZERO3ADES 0x91ac Advanced W-ZERO3 ES Smartphone product SHARP WILLCOM03 0x9242 WILLCOM03 /* Shuttle Technology products */ product SHUTTLE EUSB 0x0001 E-USB Bridge product SHUTTLE EUSCSI 0x0002 eUSCSI Bridge product SHUTTLE SDDR09 0x0003 ImageMate SDDR09 product SHUTTLE EUSBCFSM 0x0005 eUSB SmartMedia / CompactFlash Adapter product SHUTTLE ZIOMMC 0x0006 eUSB MultiMediaCard Adapter product SHUTTLE HIFD 0x0007 Sony Hifd product SHUTTLE EUSBATAPI 0x0009 eUSB ATA/ATAPI Adapter product SHUTTLE CF 0x000a eUSB CompactFlash Adapter product SHUTTLE EUSCSI_B 0x000b eUSCSI Bridge product SHUTTLE EUSCSI_C 0x000c eUSCSI Bridge product SHUTTLE CDRW 0x0101 CD-RW Device product SHUTTLE EUSBORCA 0x0325 eUSB ORCA Quad Reader /* Siemens products */ product SIEMENS SPEEDSTREAM 0x1001 SpeedStream product SIEMENS SPEEDSTREAM22 0x1022 SpeedStream 1022 product SIEMENS2 WLL013 0x001b WLL013 product SIEMENS2 ES75 0x0034 GSM module MC35 product SIEMENS2 WL54G 0x3c06 54g USB Network Adapter product SIEMENS3 SX1 0x0001 SX1 product SIEMENS3 X65 0x0003 X65 product SIEMENS3 X75 0x0004 X75 product SIEMENS3 EF81 0x0005 EF81 /* Sierra Wireless products */ product SIERRA EM5625 0x0017 EM5625 product SIERRA MC5720_2 0x0018 MC5720 product SIERRA MC5725 0x0020 MC5725 product SIERRA AIRCARD580 0x0112 Sierra Wireless AirCard 580 product SIERRA AIRCARD595 0x0019 Sierra Wireless AirCard 595 product SIERRA AC595U 0x0120 Sierra Wireless AirCard 595U product SIERRA AC597E 0x0021 Sierra Wireless AirCard 597E product SIERRA EM5725 0x0022 EM5725 product SIERRA C597 0x0023 Sierra Wireless Compass 597 product SIERRA MC5727 0x0024 MC5727 product SIERRA T598 0x0025 T598 product SIERRA T11 0x0026 T11 product SIERRA AC402 0x0027 AC402 product SIERRA MC5728 0x0028 MC5728 product SIERRA E0029 0x0029 E0029 product SIERRA AIRCARD580 0x0112 Sierra Wireless AirCard 580 product SIERRA AC595U 0x0120 Sierra Wireless AirCard 595U product SIERRA MC5720 0x0218 MC5720 Wireless Modem product SIERRA MINI5725 0x0220 Sierra Wireless miniPCI 5275 product SIERRA MC5727_2 0x0224 MC5727 product SIERRA MC8755_2 0x6802 MC8755 product SIERRA MC8765 0x6803 MC8765 product SIERRA MC8755 0x6804 MC8755 product SIERRA MC8765_2 0x6805 MC8765 product SIERRA MC8755_4 0x6808 MC8755 product SIERRA MC8765_3 0x6809 MC8765 product SIERRA AC875U 0x6812 AC875U HSDPA USB Modem product SIERRA MC8755_3 0x6813 MC8755 HSDPA product SIERRA MC8775_2 0x6815 MC8775 product SIERRA MC8775 0x6816 MC8775 product SIERRA AC875 0x6820 Sierra Wireless AirCard 875 product SIERRA AC875U_2 0x6821 AC875U product SIERRA AC875E 0x6822 AC875E product SIERRA MC8780 0x6832 MC8780 product SIERRA MC8781 0x6833 MC8781 product SIERRA MC8780_2 0x6834 MC8780 product SIERRA MC8781_2 0x6835 MC8781 product SIERRA MC8780_3 0x6838 MC8780 product SIERRA MC8781_3 0x6839 MC8781 product SIERRA MC8785 0x683A MC8785 product SIERRA MC8785_2 0x683B MC8785 product SIERRA MC8790 0x683C MC8790 product SIERRA MC8791 0x683D MC8791 product SIERRA MC8792 0x683E MC8792 product SIERRA AC880 0x6850 Sierra Wireless AirCard 880 product SIERRA AC881 0x6851 Sierra Wireless AirCard 881 product SIERRA AC880E 0x6852 Sierra Wireless AirCard 880E product SIERRA AC881E 0x6853 Sierra Wireless AirCard 881E product SIERRA AC880U 0x6855 Sierra Wireless AirCard 880U product SIERRA AC881U 0x6856 Sierra Wireless AirCard 881U product SIERRA AC885E 0x6859 AC885E product SIERRA AC885E_2 0x685A AC885E product SIERRA AC885U 0x6880 Sierra Wireless AirCard 885U product SIERRA C888 0x6890 C888 product SIERRA C22 0x6891 C22 product SIERRA E6892 0x6892 E6892 product SIERRA E6893 0x6893 E6893 product SIERRA MC8700 0x68A3 MC8700 product SIERRA MC7354 0x68C0 MC7354 product SIERRA MC7355 0x9041 MC7355 product SIERRA AC313U 0x68aa Sierra Wireless AirCard 313U product SIERRA TRUINSTALL 0x0fff Aircard Tru Installer /* Sigmatel products */ product SIGMATEL WBT_3052 0x4200 WBT-3052 IrDA/USB Bridge product SIGMATEL I_BEAD100 0x8008 i-Bead 100 MP3 Player /* SIIG products */ /* Also: Omnidirectional Control Technology products */ product SIIG DIGIFILMREADER 0x0004 DigiFilm-Combo Reader product SIIG WINTERREADER 0x0330 WINTERREADER Reader product SIIG2 DK201 0x0103 FTDI compatible adapter product SIIG2 USBTOETHER 0x0109 USB TO Ethernet product SIIG2 US2308 0x0421 Serial /* Silicom products */ product SILICOM U2E 0x0001 U2E product SILICOM GPE 0x0002 Psion Gold Port Ethernet /* SI Labs */ product SILABS VSTABI 0x0f91 VStabi Controller product SILABS ARKHAM_DS101_M 0x1101 Arkham DS101 Monitor product SILABS ARKHAM_DS101_A 0x1601 Arkham DS101 Adapter product SILABS BSM7DUSB 0x800a SPORTident BSM7-D USB product SILABS POLOLU 0x803b Pololu Serial product SILABS CYGNAL_DEBUG 0x8044 Cygnal Debug Adapter product SILABS SB_PARAMOUNT_ME 0x8043 Software Bisque Paramount ME product SILABS SAEL 0x8053 SA-EL USB product SILABS GSM2228 0x8054 Enfora GSM2228 USB product SILABS ARGUSISP 0x8066 Argussoft ISP product SILABS IMS_USB_RS422 0x806f IMS USB-RS422 product SILABS CRUMB128 0x807a Crumb128 board product SILABS OPTRIS_MSPRO 0x80c4 Optris MSpro LT Thermometer product SILABS DEGREE 0x80ca Degree Controls Inc product SILABS TRACIENT 0x80dd Tracient RFID product SILABS TRAQMATE 0x80ed Track Systems Traqmate product SILABS SUUNTO 0x80f6 Suunto Sports Instrument product SILABS ARYGON_MIFARE 0x8115 Arygon Mifare RFID reader product SILABS BURNSIDE 0x813d Burnside Telecon Deskmobile product SILABS TAMSMASTER 0x813f Tams Master Easy Control product SILABS WMRBATT 0x814a WMR RIGblaster Plug&Play product SILABS WMRRIGBLASTER 0x814a WMR RIGblaster Plug&Play product SILABS WMRRIGTALK 0x814b WMR RIGtalk RT1 product SILABS B_G_H3000 0x8156 B&G H3000 Data Cable product SILABS HELICOM 0x815e Helicomm IP-Link 1220-DVM product SILABS HAMLINKUSB 0x815f Timewave HamLinkUSB product SILABS AVIT_USB_TTL 0x818b AVIT Research USB-TTL product SILABS MJS_TOSLINK 0x819f MJS USB-TOSLINK product SILABS WAVIT 0x81a6 ThinkOptics WavIt product SILABS MULTIPLEX_RC 0x81a9 Multiplex RC adapter product SILABS MSD_DASHHAWK 0x81ac MSD DashHawk product SILABS INSYS_MODEM 0x81ad INSYS Modem product SILABS LIPOWSKY_JTAG 0x81c8 Lipowsky Baby-JTAG product SILABS LIPOWSKY_LIN 0x81e2 Lipowsky Baby-LIN product SILABS AEROCOMM 0x81e7 Aerocomm Radio product SILABS ZEPHYR_BIO 0x81e8 Zephyr Bioharness product SILABS EMS_C1007 0x81f2 EMS C1007 HF RFID controller product SILABS LIPOWSKY_HARP 0x8218 Lipowsky HARP-1 product SILABS C2_EDGE_MODEM 0x822b Commander 2 EDGE(GSM) Modem product SILABS CYGNAL_GPS 0x826b Cygnal Fasttrax GPS product SILABS TELEGESIS_ETRX2 0x8293 Telegesis ETRX2USB product SILABS PROCYON_AVS 0x82f9 Procyon AVS product SILABS MC35PU 0x8341 MC35pu product SILABS CYGNAL 0x8382 Cygnal product SILABS AMBER_AMB2560 0x83a8 Amber Wireless AMB2560 product SILABS DEKTEK_DTAPLUS 0x83d8 DekTec DTA Plus VHF/UHF Booster product SILABS KYOCERA_GPS 0x8411 Kyocera GPS product SILABS IRZ_SG10 0x8418 IRZ SG-10 GSM/GPRS Modem product SILABS BEI_VCP 0x846e BEI USB Sensor (VCP) product SILABS BALLUFF_RFID 0x8477 Balluff RFID reader product SILABS AC_SERV_IBUS 0x85ea AC-Services IBUS Interface product SILABS AC_SERV_CIS 0x85eb AC-Services CIS-IBUS product SILABS V_PREON32 0x85f8 Virtenio Preon32 product SILABS AC_SERV_CAN 0x8664 AC-Services CAN Interface product SILABS AC_SERV_OBD 0x8665 AC-Services OBD Interface product SILABS MMB_ZIGBEE 0x88a4 MMB Networks ZigBee product SILABS INGENI_ZIGBEE 0x88a5 Planet Innovation Ingeni ZigBee product SILABS CP2102 0xea60 SILABS USB UART product SILABS CP210X_2 0xea61 CP210x Serial product SILABS CP210X_3 0xea70 CP210x Serial product SILABS CP210X_4 0xea80 CP210x Serial product SILABS INFINITY_MIC 0xea71 Infinity GPS-MIC-1 Radio Monophone product SILABS USBSCOPE50 0xf001 USBscope50 product SILABS USBWAVE12 0xf002 USBwave12 product SILABS USBPULSE100 0xf003 USBpulse100 product SILABS USBCOUNT50 0xf004 USBcount50 product SILABS2 DCU11CLONE 0xaa26 DCU-11 clone product SILABS3 GPRS_MODEM 0xea61 GPRS Modem product SILABS4 100EU_MODEM 0xea61 GPRS Modem 100EU /* Silicon Portals Inc. */ product SILICONPORTALS YAPPH_NF 0x0200 YAP Phone (no firmware) product SILICONPORTALS YAPPHONE 0x0201 YAP Phone /* Sirius Technologies products */ product SIRIUS ROADSTER 0x0001 NetComm Roadster II 56 USB /* Sitecom products */ product SITECOM LN029 0x182d USB 2.0 Ethernet product SITECOM SERIAL 0x2068 USB to serial cable (v2) product SITECOM2 WL022 0x182d WL-022 /* Sitecom Europe products */ product SITECOMEU RT2870_1 0x0017 RT2870 product SITECOMEU WL168V1 0x000d WL-168 v1 product SITECOMEU LN030 0x0021 MCS7830 product SITECOMEU WL168V4 0x0028 WL-168 v4 product SITECOMEU RT2870_2 0x002b RT2870 product SITECOMEU RT2870_3 0x002c RT2870 product SITECOMEU RT2870_4 0x002d RT2870 product SITECOMEU RT2770 0x0039 RT2770 product SITECOMEU RT3070_2 0x003b RT3070 product SITECOMEU RT3070_3 0x003c RT3070 product SITECOMEU RT3070_4 0x003d RT3070 product SITECOMEU RT3070 0x003e RT3070 product SITECOMEU WL608 0x003f WL-608 product SITECOMEU RT3071 0x0040 RT3071 product SITECOMEU RT3072_1 0x0041 RT3072 product SITECOMEU RT3072_2 0x0042 RT3072 product SITECOMEU WL353 0x0045 WL-353 product SITECOMEU RT3072_3 0x0047 RT3072 product SITECOMEU RT3072_4 0x0048 RT3072 product SITECOMEU RT3072_5 0x004a RT3072 product SITECOMEU WL349V1 0x004b WL-349 v1 product SITECOMEU RT3072_6 0x004d RT3072 product SITECOMEU RTL8188CU_1 0x0052 RTL8188CU product SITECOMEU RTL8188CU_2 0x005c RTL8188CU product SITECOMEU RTL8192CU 0x0061 RTL8192CU product SITECOMEU LN032 0x0072 LN-032 product SITECOMEU LN031 0x0056 LN-031 product SITECOMEU LN028 0x061c LN-028 product SITECOMEU WL113 0x9071 WL-113 product SITECOMEU ZD1211B 0x9075 ZD1211B product SITECOMEU WL172 0x90ac WL-172 product SITECOMEU WL113R2 0x9712 WL-113 rev 2 /* Skanhex Technology products */ product SKANHEX MD_7425 0x410a MD 7425 Camera product SKANHEX SX_520Z 0x5200 SX 520z Camera /* Smart Technologies products */ product SMART PL2303 0x2303 Serial adapter /* SmartBridges products */ product SMARTBRIDGES SMARTLINK 0x0001 SmartLink USB Ethernet product SMARTBRIDGES SMARTNIC 0x0003 smartNIC 2 PnP Ethernet /* SMC products */ product SMC 2102USB 0x0100 10Mbps Ethernet product SMC 2202USB 0x0200 10/100 Ethernet product SMC 2206USB 0x0201 EZ Connect USB Ethernet product SMC 2862WG 0xee13 EZ Connect Wireless Adapter product SMC2 2020HUB 0x2020 USB Hub product SMC2 2514HUB 0x2514 USB Hub product SMC3 2662WUSB 0xa002 2662W-AR Wireless product SMC2 LAN9500_ETH 0x9500 USB/Ethernet product SMC2 LAN9505_ETH 0x9505 USB/Ethernet product SMC2 LAN9530_ETH 0x9530 USB/Ethernet product SMC2 LAN9730_ETH 0x9730 USB/Ethernet product SMC2 LAN9500_SAL10 0x9900 USB/Ethernet product SMC2 LAN9505_SAL10 0x9901 USB/Ethernet product SMC2 LAN9500A_SAL10 0x9902 USB/Ethernet product SMC2 LAN9505A_SAL10 0x9903 USB/Ethernet product SMC2 LAN9514_SAL10 0x9904 USB/Ethernet product SMC2 LAN9500A_HAL 0x9905 USB/Ethernet product SMC2 LAN9505A_HAL 0x9906 USB/Ethernet product SMC2 LAN9500_ETH_2 0x9907 USB/Ethernet product SMC2 LAN9500A_ETH_2 0x9908 USB/Ethernet product SMC2 LAN9514_ETH_2 0x9909 USB/Ethernet product SMC2 LAN9500A_ETH 0x9e00 USB/Ethernet product SMC2 LAN9505A_ETH 0x9e01 USB/Ethernet product SMC2 LAN89530_ETH 0x9e08 USB/Ethernet product SMC2 LAN9514_ETH 0xec00 USB/Ethernet /* SOHOware products */ product SOHOWARE NUB100 0x9100 10/100 USB Ethernet product SOHOWARE NUB110 0x9110 10/100 USB Ethernet /* SOLID YEAR products */ product SOLIDYEAR KEYBOARD 0x2101 Solid Year USB keyboard /* SONY products */ product SONY DSC 0x0010 DSC cameras product SONY MS_NW_MS7 0x0025 Memorystick NW-MS7 product SONY PORTABLE_HDD_V2 0x002b Portable USB Harddrive V2 product SONY MSACUS1 0x002d Memorystick MSAC-US1 product SONY HANDYCAM 0x002e Handycam product SONY MSC 0x0032 MSC memory stick slot product SONY CLIE_35 0x0038 Sony Clie v3.5 product SONY MS_PEG_N760C 0x0058 PEG N760c Memorystick product SONY CLIE_40 0x0066 Sony Clie v4.0 product SONY MS_MSC_U03 0x0069 Memorystick MSC-U03 product SONY CLIE_40_MS 0x006d Sony Clie v4.0 Memory Stick slot product SONY CLIE_S360 0x0095 Sony Clie s360 product SONY CLIE_41_MS 0x0099 Sony Clie v4.1 Memory Stick slot product SONY CLIE_41 0x009a Sony Clie v4.1 product SONY CLIE_NX60 0x00da Sony Clie nx60 product SONY CLIE_TH55 0x0144 Sony Clie th55 product SONY CLIE_TJ37 0x0169 Sony Clie tj37 product SONY RF_RECEIVER 0x01db Sony RF mouse/kbd Receiver VGP-WRC1 product SONY QN3 0x0437 Sony QN3 CMD-Jxx phone cable /* Sony Ericsson products */ product SONYERICSSON DCU10 0x0528 DCU-10 Phone Data Cable product SONYERICSSON DATAPILOT 0x2003 Datapilot Phone Cable /* SOURCENEXT products */ product SOURCENEXT KEIKAI8 0x039f KeikaiDenwa 8 product SOURCENEXT KEIKAI8_CHG 0x012e KeikaiDenwa 8 with charger /* SparkLAN products */ product SPARKLAN RT2573 0x0004 RT2573 product SPARKLAN RT2870_1 0x0006 RT2870 product SPARKLAN RT3070 0x0010 RT3070 /* Soundgraph products */ product SOUNDGRAPH IMON_VFD 0x0044 Antec Veris Elite VFD Panel, Knob, and Remote product SOUNDGRAPH SSTONE_LC16 0xffdc Silverstone LC16 VFD Panel, Knob, and Remote /* Speed Dragon Multimedia products */ product SPEEDDRAGON MS3303H 0x110b MS3303H Serial /* Sphairon Access Systems GmbH products */ product SPHAIRON UB801R 0x0110 UB801R /* Stelera Wireless products */ product STELERA ZEROCD 0x1000 Zerocd Installer product STELERA C105 0x1002 Stelera/Bandrish C105 USB product STELERA E1003 0x1003 3G modem product STELERA E1004 0x1004 3G modem product STELERA E1005 0x1005 3G modem product STELERA E1006 0x1006 3G modem product STELERA E1007 0x1007 3G modem product STELERA E1008 0x1008 3G modem product STELERA E1009 0x1009 3G modem product STELERA E100A 0x100a 3G modem product STELERA E100B 0x100b 3G modem product STELERA E100C 0x100c 3G modem product STELERA E100D 0x100d 3G modem product STELERA E100E 0x100e 3G modem product STELERA E100F 0x100f 3G modem product STELERA E1010 0x1010 3G modem product STELERA E1011 0x1011 3G modem product STELERA E1012 0x1012 3G modem /* STMicroelectronics products */ product STMICRO BIOCPU 0x2016 Biometric Coprocessor product STMICRO COMMUNICATOR 0x7554 USB Communicator product STMICRO ST72682 0xfada USB 2.0 Flash drive controller /* STSN products */ product STSN STSN0001 0x0001 Internet Access Device /* SUN Corporation products */ product SUNTAC DS96L 0x0003 SUNTAC U-Cable type D2 product SUNTAC PS64P1 0x0005 SUNTAC U-Cable type P1 product SUNTAC VS10U 0x0009 SUNTAC Slipper U product SUNTAC IS96U 0x000a SUNTAC Ir-Trinity product SUNTAC AS64LX 0x000b SUNTAC U-Cable type A3 product SUNTAC AS144L4 0x0011 SUNTAC U-Cable type A4 /* Sun Microsystems products */ product SUN KEYBOARD_TYPE_6 0x0005 Type 6 USB keyboard product SUN KEYBOARD_TYPE_7 0x00a2 Type 7 USB keyboard /* XXX The above is a North American PC style keyboard possibly */ product SUN MOUSE 0x0100 Type 6 USB mouse product SUN KBD_HUB 0x100e Kbd Hub /* Sunplus Innovation Technology Inc. products */ product SUNPLUS USBMOUSE 0x0007 USB Optical Mouse /* Super Top products */ product SUPERTOP IDE 0x6600 USB-IDE product SUPERTOP FLASHDRIVE 0x121c extrememory Snippy /* Syntech products */ product SYNTECH CPT8001C 0x0001 CPT-8001C Barcode scanner product SYNTECH CYPHERLAB100 0x1000 CipherLab USB Barcode Scanner /* Teclast products */ product TECLAST TLC300 0x3203 USB Media Player /* Testo products */ product TESTO USB_INTERFACE 0x0001 FTDI compatible adapter /* TexTech products */ product TEXTECH DUMMY 0x0000 Dummy product product TEXTECH U2M_1 0x0101 Textech USB MIDI cable product TEXTECH U2M_2 0x1806 Textech USB MIDI cable /* The Mobility Lab products */ product TML USB_SERIAL 0x0064 FTDI compatible adapter /* Thurlby Thandar Instrument products */ product TTI QL355P 0x03e8 FTDI compatible adapter /* Supra products */ product DIAMOND2 SUPRAEXPRESS56K 0x07da Supra Express 56K modem product DIAMOND2 SUPRA2890 0x0b4a SupraMax 2890 56K Modem product DIAMOND2 RIO600USB 0x5001 Rio 600 USB product DIAMOND2 RIO800USB 0x5002 Rio 800 USB /* Surecom Technology products */ product SURECOM EP9001G2A 0x11f2 EP-9001-G rev 2A product SURECOM RT2570 0x11f3 RT2570 product SURECOM RT2573 0x31f3 RT2573 /* Sweex products */ product SWEEX ZD1211 0x1809 ZD1211 product SWEEX2 LW153 0x0153 LW153 product SWEEX2 LW154 0x0154 LW154 product SWEEX2 LW303 0x0302 LW303 product SWEEX2 LW313 0x0313 LW313 /* System TALKS, Inc. */ product SYSTEMTALKS SGCX2UL 0x1920 SGC-X2UL /* Tapwave products */ product TAPWAVE ZODIAC 0x0100 Zodiac /* Taugagreining products */ product TAUGA CAMERAMATE 0x0005 CameraMate (DPCM_USB) /* TCTMobile products */ product TCTMOBILE X060S 0x0000 X060S 3G modem product TCTMOBILE X080S 0xf000 X080S 3G modem /* TDK products */ product TDK UPA9664 0x0115 USB-PDC Adapter UPA9664 product TDK UCA1464 0x0116 USB-cdmaOne Adapter UCA1464 product TDK UHA6400 0x0117 USB-PHS Adapter UHA6400 product TDK UPA6400 0x0118 USB-PHS Adapter UPA6400 product TDK BT_DONGLE 0x0309 Bluetooth USB dongle /* TEAC products */ product TEAC FD05PUB 0x0000 FD-05PUB floppy /* Tekram Technology products */ product TEKRAM QUICKWLAN 0x1630 QuickWLAN product TEKRAM ZD1211_1 0x5630 ZD1211 product TEKRAM ZD1211_2 0x6630 ZD1211 /* Telex Communications products */ product TELEX MIC1 0x0001 Enhanced USB Microphone /* Telit products */ product TELIT UC864E 0x1003 UC864E 3G modem product TELIT UC864G 0x1004 UC864G 3G modem /* Ten X Technology, Inc. */ product TENX UAUDIO0 0xf211 USB audio headset /* Texas Intel products */ product TI UTUSB41 0x1446 UT-USB41 hub product TI TUSB2046 0x2046 TUSB2046 hub /* Thrustmaster products */ product THRUST FUSION_PAD 0xa0a3 Fusion Digital Gamepad /* TLayTech products */ product TLAYTECH TEU800 0x1682 TEU800 3G modem /* Topre Corporation products */ product TOPRE HHKB 0x0100 HHKB Professional /* Toshiba Corporation products */ product TOSHIBA POCKETPC_E740 0x0706 PocketPC e740 product TOSHIBA RT3070 0x0a07 RT3070 product TOSHIBA G450 0x0d45 G450 modem product TOSHIBA HSDPA 0x1302 G450 modem product TOSHIBA TRANSMEMORY 0x6545 USB ThumbDrive /* Trek Technology products */ product TREK THUMBDRIVE 0x1111 ThumbDrive product TREK MEMKEY 0x8888 IBM USB Memory Key product TREK THUMBDRIVE_8MB 0x9988 ThumbDrive_8MB /* TRENDnet products */ product TRENDNET RTL8192CU 0x624d RTL8192CU product TRENDNET TEW646UBH 0x646b TEW-646UBH product TRENDNET RTL8188CU 0x648b RTL8188CU /* Tripp-Lite products */ product TRIPPLITE U209 0x2008 Serial /* Trumpion products */ product TRUMPION T33520 0x1001 T33520 USB Flash Card Controller product TRUMPION C3310 0x1100 Comotron C3310 MP3 player product TRUMPION MP3 0x1200 MP3 player /* TwinMOS */ product TWINMOS G240 0xa006 G240 product TWINMOS MDIV 0x1325 Memory Disk IV /* Ubiquam products */ product UBIQUAM UALL 0x3100 CDMA 1xRTT USB Modem (U-100/105/200/300/520) /* Ultima products */ product ULTIMA 1200UBPLUS 0x4002 1200 UB Plus scanner /* UMAX products */ product UMAX ASTRA1236U 0x0002 Astra 1236U Scanner product UMAX ASTRA1220U 0x0010 Astra 1220U Scanner product UMAX ASTRA2000U 0x0030 Astra 2000U Scanner product UMAX ASTRA2100U 0x0130 Astra 2100U Scanner product UMAX ASTRA2200U 0x0230 Astra 2200U Scanner product UMAX ASTRA3400 0x0060 Astra 3400 Scanner /* U-MEDIA Communications products */ product UMEDIA TEW444UBEU 0x3006 TEW-444UB EU product UMEDIA TEW444UBEU_NF 0x3007 TEW-444UB EU (no firmware) product UMEDIA TEW429UB_A 0x300a TEW-429UB_A product UMEDIA TEW429UB 0x300b TEW-429UB product UMEDIA TEW429UBC1 0x300d TEW-429UB C1 product UMEDIA RT2870_1 0x300e RT2870 product UMEDIA ALL0298V2 0x3204 ALL0298 v2 product UMEDIA AR5523_2 0x3205 AR5523 product UMEDIA AR5523_2_NF 0x3206 AR5523 (no firmware) /* Universal Access products */ product UNIACCESS PANACHE 0x0101 Panache Surf USB ISDN Adapter /* Unknown products */ product UNKNOWN4 NF_RIC 0x0001 FTDI compatible adapter /* USI products */ product USI MC60 0x10c5 MC60 Serial /* U.S. Robotics products */ product USR USR5422 0x0118 USR5422 WLAN product USR USR5423 0x0121 USR5423 WLAN /* VIA Technologies products */ product VIA USB2IDEBRIDGE 0x6204 USB 2.0 IDE Bridge /* VIA Labs */ product VIALABS USB30SATABRIDGE 0x0700 USB 3.0 SATA Bridge /* Vaisala products */ product VAISALA CABLE 0x0200 USB Interface cable /* Vertex products */ product VERTEX VW110L 0x0100 Vertex VW110L modem /* VidzMedia products */ product VIDZMEDIA MONSTERTV 0x4fb1 MonsterTV P2H /* Vision products */ product VISION VC6452V002 0x0002 CPiA Camera /* Visioneer products */ product VISIONEER 7600 0x0211 OneTouch 7600 product VISIONEER 5300 0x0221 OneTouch 5300 product VISIONEER 3000 0x0224 Scanport 3000 product VISIONEER 6100 0x0231 OneTouch 6100 product VISIONEER 6200 0x0311 OneTouch 6200 product VISIONEER 8100 0x0321 OneTouch 8100 product VISIONEER 8600 0x0331 OneTouch 8600 /* Vivitar products */ product VIVITAR 35XX 0x0003 Vivicam 35Xx /* VTech products */ product VTECH RT2570 0x3012 RT2570 product VTECH ZD1211B 0x3014 ZD1211B /* Wacom products */ product WACOM CT0405U 0x0000 CT-0405-U Tablet product WACOM GRAPHIRE 0x0010 Graphire product WACOM GRAPHIRE3_4X5 0x0013 Graphire 3 4x5 product WACOM INTUOSA5 0x0021 Intuos A5 product WACOM GD0912U 0x0022 Intuos 9x12 Graphics Tablet /* WAGO Kontakttechnik GmbH products */ product WAGO SERVICECABLE 0x07a6 USB Service Cable 750-923 /* WaveSense products */ product WAVESENSE JAZZ 0xaaaa Jazz blood glucose meter /* WCH products */ product WCH CH341SER 0x5523 CH341/CH340 USB-Serial Bridge product WCH2 DUMMY 0x0000 Dummy product product WCH2 CH341SER_2 0x5523 CH341/CH340 USB-Serial Bridge product WCH2 CH341SER 0x7523 CH341/CH340 USB-Serial Bridge product WCH2 U2M 0X752d CH345 USB2.0-MIDI /* West Mountain Radio products */ product WESTMOUNTAIN RIGBLASTER_ADVANTAGE 0x0003 RIGblaster Advantage /* Western Digital products */ product WESTERN COMBO 0x0200 Firewire USB Combo product WESTERN EXTHDD 0x0400 External HDD product WESTERN HUB 0x0500 USB HUB product WESTERN MYBOOK 0x0901 MyBook External HDD product WESTERN MYPASSPORT_00 0x0704 MyPassport External HDD product WESTERN MYPASSPORT_11 0x0741 MyPassport External HDD product WESTERN MYPASSPORT_01 0x0746 MyPassport External HDD product WESTERN MYPASSPORT_02 0x0748 MyPassport External HDD product WESTERN MYPASSPORT_03 0x074A MyPassport External HDD product WESTERN MYPASSPORT_04 0x074C MyPassport External HDD product WESTERN MYPASSPORT_05 0x074E MyPassport External HDD product WESTERN MYPASSPORT_06 0x07A6 MyPassport External HDD product WESTERN MYPASSPORT_07 0x07A8 MyPassport External HDD product WESTERN MYPASSPORT_08 0x07AA MyPassport External HDD product WESTERN MYPASSPORT_09 0x07AC MyPassport External HDD product WESTERN MYPASSPORT_10 0x07AE MyPassport External HDD product WESTERN MYPASSPORTES_00 0x070A MyPassport Essential External HDD product WESTERN MYPASSPORTES_01 0x071A MyPassport Essential External HDD product WESTERN MYPASSPORTES_02 0x0730 MyPassport Essential External HDD product WESTERN MYPASSPORTES_03 0x0732 MyPassport Essential External HDD product WESTERN MYPASSPORTES_04 0x0740 MyPassport Essential External HDD product WESTERN MYPASSPORTES_05 0x0742 MyPassport Essential External HDD product WESTERN MYPASSPORTES_06 0x0750 MyPassport Essential External HDD product WESTERN MYPASSPORTES_07 0x0752 MyPassport Essential External HDD product WESTERN MYPASSPORTES_08 0x07A0 MyPassport Essential External HDD product WESTERN MYPASSPORTES_09 0x07A2 MyPassport Essential External HDD /* WeTelecom products */ product WETELECOM WM_D200 0x6801 WM-D200 /* WIENER Plein & Baus GmbH products */ product WIENERPLEINBAUS PL512 0x0010 PL512 PSU product WIENERPLEINBAUS RCM 0x0011 RCM Remote Control product WIENERPLEINBAUS MPOD 0x0012 MPOD PSU product WIENERPLEINBAUS CML 0x0015 CML Data Logger /* Windbond Electronics */ product WINBOND UH104 0x5518 4-port USB Hub /* WinMaxGroup products */ product WINMAXGROUP FLASH64MC 0x6660 USB Flash Disk 64M-C /* Wistron NeWeb products */ product WISTRONNEWEB WNC0600 0x0326 WNC-0600USB product WISTRONNEWEB UR045G 0x0427 PrismGT USB 2.0 WLAN product WISTRONNEWEB UR055G 0x0711 UR055G product WISTRONNEWEB O8494 0x0804 ORiNOCO 802.11n product WISTRONNEWEB AR5523_1 0x0826 AR5523 product WISTRONNEWEB AR5523_1_NF 0x0827 AR5523 (no firmware) product WISTRONNEWEB AR5523_2 0x082a AR5523 product WISTRONNEWEB AR5523_2_NF 0x0829 AR5523 (no firmware) /* Xerox products */ product XEROX WCM15 0xffef WorkCenter M15 /* Xirlink products */ product XIRLINK PCCAM 0x8080 IBM PC Camera /* Xyratex products */ product XYRATEX PRISM_GT_1 0x2000 PrismGT USB 2.0 WLAN product XYRATEX PRISM_GT_2 0x2002 PrismGT USB 2.0 WLAN /* Yamaha products */ product YAMAHA UX256 0x1000 UX256 MIDI I/F product YAMAHA UX96 0x1008 UX96 MIDI I/F product YAMAHA RPU200 0x3104 RP-U200 product YAMAHA RTA54I 0x4000 NetVolante RTA54i Broadband&ISDN Router product YAMAHA RTW65B 0x4001 NetVolante RTW65b Broadband Wireless Router product YAMAHA RTW65I 0x4002 NetVolante RTW65i Broadband&ISDN Wireless Router product YAMAHA RTA55I 0x4004 NetVolante RTA55i Broadband VoIP Router /* Yano products */ product YANO U640MO 0x0101 U640MO-03 product YANO FW800HD 0x05fc METALWEAR-HDD /* Y.C. Cable products */ product YCCABLE PL2303 0x0fba PL2303 Serial /* Y-E Data products */ product YEDATA FLASHBUSTERU 0x0000 Flashbuster-U /* Yiso Wireless Co. products */ product YISO C893 0xc893 CDMA 2000 1xEVDO PC Card /* Z-Com products */ product ZCOM M4Y750 0x0001 M4Y-750 product ZCOM XI725 0x0002 XI-725/726 product ZCOM XI735 0x0005 XI-735 product ZCOM XG703A 0x0008 PrismGT USB 2.0 WLAN product ZCOM ZD1211 0x0011 ZD1211 product ZCOM AR5523 0x0012 AR5523 product ZCOM AR5523_NF 0x0013 AR5523 driver (no firmware) product ZCOM XM142 0x0015 XM-142 product ZCOM ZD1211B 0x001a ZD1211B product ZCOM RT2870_1 0x0022 RT2870 product ZCOM UB81 0x0023 UB81 product ZCOM RT2870_2 0x0025 RT2870 product ZCOM UB82 0x0026 UB82 /* Zinwell products */ product ZINWELL RT2570 0x0260 RT2570 product ZINWELL RT2870_1 0x0280 RT2870 product ZINWELL RT2870_2 0x0282 RT2870 product ZINWELL RT3072_1 0x0283 RT3072 product ZINWELL RT3072_2 0x0284 RT3072 product ZINWELL RT3070 0x5257 RT3070 /* Zoom Telephonics, Inc. products */ product ZOOM 2986L 0x9700 2986L Fax modem /* Zoran Microelectronics products */ product ZORAN EX20DSC 0x4343 Digital Camera EX-20 DSC /* Zydas Technology Corporation products */ product ZYDAS ZD1211 0x1211 ZD1211 WLAN abg product ZYDAS ZD1211B 0x1215 ZD1211B product ZYDAS ZD1221 0x1221 ZD1221 /* ZyXEL Communication Co. products */ product ZYXEL OMNI56K 0x1500 Omni 56K Plus product ZYXEL 980N 0x2011 Scorpion-980N keyboard product ZYXEL ZYAIRG220 0x3401 ZyAIR G-220 product ZYXEL G200V2 0x3407 G-200 v2 product ZYXEL AG225H 0x3409 AG-225H product ZYXEL M202 0x340a M-202 product ZYXEL G220V2 0x340f G-220 v2 product ZYXEL G202 0x3410 G-202 product ZYXEL RT2870_1 0x3416 RT2870 product ZYXEL NWD271N 0x3417 NWD-271N product ZYXEL NWD211AN 0x3418 NWD-211AN product ZYXEL RT2870_2 0x341a RT2870 product ZYXEL RT3070 0x341e NWD2105 product ZYXEL RTL8192CU 0x341f RTL8192CU product ZYXEL NWD2705 0x3421 NWD2705 Index: projects/release-pkg/sys/dev/usb/wlan/if_urtwn.c =================================================================== --- projects/release-pkg/sys/dev/usb/wlan/if_urtwn.c (revision 295925) +++ projects/release-pkg/sys/dev/usb/wlan/if_urtwn.c (revision 295926) @@ -1,5281 +1,5282 @@ /* $OpenBSD: if_urtwn.c,v 1.16 2011/02/10 17:26:40 jakemsr Exp $ */ /*- * Copyright (c) 2010 Damien Bergamini * Copyright (c) 2014 Kevin Lo * Copyright (c) 2015 Andriy Voskoboinyk * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for Realtek RTL8188CE-VAU/RTL8188CUS/RTL8188EU/RTL8188RU/RTL8192CU. */ #include "opt_wlan.h" #include "opt_urtwn.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #include #include #include #ifdef USB_DEBUG enum { URTWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ URTWN_DEBUG_RECV = 0x00000002, /* basic recv operation */ URTWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */ URTWN_DEBUG_RA = 0x00000008, /* f/w rate adaptation setup */ URTWN_DEBUG_USB = 0x00000010, /* usb requests */ URTWN_DEBUG_FIRMWARE = 0x00000020, /* firmware(9) loading debug */ URTWN_DEBUG_BEACON = 0x00000040, /* beacon handling */ URTWN_DEBUG_INTR = 0x00000080, /* ISR */ URTWN_DEBUG_TEMP = 0x00000100, /* temperature calibration */ URTWN_DEBUG_ROM = 0x00000200, /* various ROM info */ URTWN_DEBUG_KEY = 0x00000400, /* crypto keys management */ URTWN_DEBUG_TXPWR = 0x00000800, /* dump Tx power values */ URTWN_DEBUG_ANY = 0xffffffff }; #define URTWN_DPRINTF(_sc, _m, ...) do { \ if ((_sc)->sc_debug & (_m)) \ device_printf((_sc)->sc_dev, __VA_ARGS__); \ } while(0) #else #define URTWN_DPRINTF(_sc, _m, ...) do { (void) sc; } while (0) #endif #define IEEE80211_HAS_ADDR4(wh) IEEE80211_IS_DSTODS(wh) /* various supported device vendors/products */ static const STRUCT_USB_HOST_ID urtwn_devs[] = { #define URTWN_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } #define URTWN_RTL8188E_DEV(v,p) \ { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, URTWN_RTL8188E) } #define URTWN_RTL8188E 1 URTWN_DEV(ABOCOM, RTL8188CU_1), URTWN_DEV(ABOCOM, RTL8188CU_2), URTWN_DEV(ABOCOM, RTL8192CU), URTWN_DEV(ASUS, RTL8192CU), URTWN_DEV(ASUS, USBN10NANO), URTWN_DEV(AZUREWAVE, RTL8188CE_1), URTWN_DEV(AZUREWAVE, RTL8188CE_2), URTWN_DEV(AZUREWAVE, RTL8188CU), URTWN_DEV(BELKIN, F7D2102), URTWN_DEV(BELKIN, RTL8188CU), URTWN_DEV(BELKIN, RTL8192CU), URTWN_DEV(CHICONY, RTL8188CUS_1), URTWN_DEV(CHICONY, RTL8188CUS_2), URTWN_DEV(CHICONY, RTL8188CUS_3), URTWN_DEV(CHICONY, RTL8188CUS_4), URTWN_DEV(CHICONY, RTL8188CUS_5), URTWN_DEV(COREGA, RTL8192CU), URTWN_DEV(DLINK, RTL8188CU), URTWN_DEV(DLINK, RTL8192CU_1), URTWN_DEV(DLINK, RTL8192CU_2), URTWN_DEV(DLINK, RTL8192CU_3), URTWN_DEV(DLINK, DWA131B), URTWN_DEV(EDIMAX, EW7811UN), URTWN_DEV(EDIMAX, RTL8192CU), URTWN_DEV(FEIXUN, RTL8188CU), URTWN_DEV(FEIXUN, RTL8192CU), URTWN_DEV(GUILLEMOT, HWNUP150), URTWN_DEV(HAWKING, RTL8192CU), URTWN_DEV(HP3, RTL8188CU), URTWN_DEV(NETGEAR, WNA1000M), URTWN_DEV(NETGEAR, RTL8192CU), URTWN_DEV(NETGEAR4, RTL8188CU), URTWN_DEV(NOVATECH, RTL8188CU), URTWN_DEV(PLANEX2, RTL8188CU_1), URTWN_DEV(PLANEX2, RTL8188CU_2), URTWN_DEV(PLANEX2, RTL8188CU_3), URTWN_DEV(PLANEX2, RTL8188CU_4), URTWN_DEV(PLANEX2, RTL8188CUS), URTWN_DEV(PLANEX2, RTL8192CU), URTWN_DEV(REALTEK, RTL8188CE_0), URTWN_DEV(REALTEK, RTL8188CE_1), URTWN_DEV(REALTEK, RTL8188CTV), URTWN_DEV(REALTEK, RTL8188CU_0), URTWN_DEV(REALTEK, RTL8188CU_1), URTWN_DEV(REALTEK, RTL8188CU_2), URTWN_DEV(REALTEK, RTL8188CU_3), URTWN_DEV(REALTEK, RTL8188CU_COMBO), URTWN_DEV(REALTEK, RTL8188CUS), URTWN_DEV(REALTEK, RTL8188RU_1), URTWN_DEV(REALTEK, RTL8188RU_2), URTWN_DEV(REALTEK, RTL8188RU_3), URTWN_DEV(REALTEK, RTL8191CU), URTWN_DEV(REALTEK, RTL8192CE), URTWN_DEV(REALTEK, RTL8192CU), URTWN_DEV(SITECOMEU, RTL8188CU_1), URTWN_DEV(SITECOMEU, RTL8188CU_2), URTWN_DEV(SITECOMEU, RTL8192CU), URTWN_DEV(TRENDNET, RTL8188CU), URTWN_DEV(TRENDNET, RTL8192CU), URTWN_DEV(ZYXEL, RTL8192CU), /* URTWN_RTL8188E */ + URTWN_RTL8188E_DEV(ABOCOM, RTL8188EU), URTWN_RTL8188E_DEV(DLINK, DWA123D1), URTWN_RTL8188E_DEV(DLINK, DWA125D1), URTWN_RTL8188E_DEV(ELECOM, WDC150SU2M), URTWN_RTL8188E_DEV(REALTEK, RTL8188ETV), URTWN_RTL8188E_DEV(REALTEK, RTL8188EU), #undef URTWN_RTL8188E_DEV #undef URTWN_DEV }; static device_probe_t urtwn_match; static device_attach_t urtwn_attach; static device_detach_t urtwn_detach; static usb_callback_t urtwn_bulk_tx_callback; static usb_callback_t urtwn_bulk_rx_callback; static void urtwn_sysctlattach(struct urtwn_softc *); static void urtwn_drain_mbufq(struct urtwn_softc *); static usb_error_t urtwn_do_request(struct urtwn_softc *, struct usb_device_request *, void *); static struct ieee80211vap *urtwn_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void urtwn_vap_delete(struct ieee80211vap *); static struct mbuf * urtwn_rx_copy_to_mbuf(struct urtwn_softc *, struct r92c_rx_stat *, int); static struct mbuf * urtwn_report_intr(struct usb_xfer *, struct urtwn_data *); static struct mbuf * urtwn_rxeof(struct urtwn_softc *, uint8_t *, int); static void urtwn_r88e_ratectl_tx_complete(struct urtwn_softc *, void *); static struct ieee80211_node *urtwn_rx_frame(struct urtwn_softc *, struct mbuf *, int8_t *); static void urtwn_txeof(struct urtwn_softc *, struct urtwn_data *, int); static int urtwn_alloc_list(struct urtwn_softc *, struct urtwn_data[], int, int); static int urtwn_alloc_rx_list(struct urtwn_softc *); static int urtwn_alloc_tx_list(struct urtwn_softc *); static void urtwn_free_list(struct urtwn_softc *, struct urtwn_data data[], int); static void urtwn_free_rx_list(struct urtwn_softc *); static void urtwn_free_tx_list(struct urtwn_softc *); static struct urtwn_data * _urtwn_getbuf(struct urtwn_softc *); static struct urtwn_data * urtwn_getbuf(struct urtwn_softc *); static usb_error_t urtwn_write_region_1(struct urtwn_softc *, uint16_t, uint8_t *, int); static usb_error_t urtwn_write_1(struct urtwn_softc *, uint16_t, uint8_t); static usb_error_t urtwn_write_2(struct urtwn_softc *, uint16_t, uint16_t); static usb_error_t urtwn_write_4(struct urtwn_softc *, uint16_t, uint32_t); static usb_error_t urtwn_read_region_1(struct urtwn_softc *, uint16_t, uint8_t *, int); static uint8_t urtwn_read_1(struct urtwn_softc *, uint16_t); static uint16_t urtwn_read_2(struct urtwn_softc *, uint16_t); static uint32_t urtwn_read_4(struct urtwn_softc *, uint16_t); static int urtwn_fw_cmd(struct urtwn_softc *, uint8_t, const void *, int); static void urtwn_cmdq_cb(void *, int); static int urtwn_cmd_sleepable(struct urtwn_softc *, const void *, size_t, CMD_FUNC_PROTO); static void urtwn_r92c_rf_write(struct urtwn_softc *, int, uint8_t, uint32_t); static void urtwn_r88e_rf_write(struct urtwn_softc *, int, uint8_t, uint32_t); static uint32_t urtwn_rf_read(struct urtwn_softc *, int, uint8_t); static int urtwn_llt_write(struct urtwn_softc *, uint32_t, uint32_t); static int urtwn_efuse_read_next(struct urtwn_softc *, uint8_t *); static int urtwn_efuse_read_data(struct urtwn_softc *, uint8_t *, uint8_t, uint8_t); #ifdef USB_DEBUG static void urtwn_dump_rom_contents(struct urtwn_softc *, uint8_t *, uint16_t); #endif static int urtwn_efuse_read(struct urtwn_softc *, uint8_t *, uint16_t); static int urtwn_efuse_switch_power(struct urtwn_softc *); static int urtwn_read_chipid(struct urtwn_softc *); static int urtwn_read_rom(struct urtwn_softc *); static int urtwn_r88e_read_rom(struct urtwn_softc *); static int urtwn_ra_init(struct urtwn_softc *); static void urtwn_init_beacon(struct urtwn_softc *, struct urtwn_vap *); static int urtwn_setup_beacon(struct urtwn_softc *, struct ieee80211_node *); static void urtwn_update_beacon(struct ieee80211vap *, int); static int urtwn_tx_beacon(struct urtwn_softc *sc, struct urtwn_vap *); static int urtwn_key_alloc(struct ieee80211vap *, struct ieee80211_key *, ieee80211_keyix *, ieee80211_keyix *); static void urtwn_key_set_cb(struct urtwn_softc *, union sec_param *); static void urtwn_key_del_cb(struct urtwn_softc *, union sec_param *); static int urtwn_key_set(struct ieee80211vap *, const struct ieee80211_key *); static int urtwn_key_delete(struct ieee80211vap *, const struct ieee80211_key *); static void urtwn_tsf_task_adhoc(void *, int); static void urtwn_tsf_sync_enable(struct urtwn_softc *, struct ieee80211vap *); static void urtwn_get_tsf(struct urtwn_softc *, uint64_t *); static void urtwn_set_led(struct urtwn_softc *, int, int); static void urtwn_set_mode(struct urtwn_softc *, uint8_t); static void urtwn_ibss_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, const struct ieee80211_rx_stats *, int, int); static int urtwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void urtwn_calib_to(void *); static void urtwn_calib_cb(struct urtwn_softc *, union sec_param *); static void urtwn_watchdog(void *); static void urtwn_update_avgrssi(struct urtwn_softc *, int, int8_t); static int8_t urtwn_get_rssi(struct urtwn_softc *, int, void *); static int8_t urtwn_r88e_get_rssi(struct urtwn_softc *, int, void *); static int urtwn_tx_data(struct urtwn_softc *, struct ieee80211_node *, struct mbuf *, struct urtwn_data *); static int urtwn_tx_raw(struct urtwn_softc *, struct ieee80211_node *, struct mbuf *, struct urtwn_data *, const struct ieee80211_bpf_params *); static void urtwn_tx_start(struct urtwn_softc *, struct mbuf *, uint8_t, struct urtwn_data *); static int urtwn_transmit(struct ieee80211com *, struct mbuf *); static void urtwn_start(struct urtwn_softc *); static void urtwn_parent(struct ieee80211com *); static int urtwn_r92c_power_on(struct urtwn_softc *); static int urtwn_r88e_power_on(struct urtwn_softc *); static void urtwn_r92c_power_off(struct urtwn_softc *); static void urtwn_r88e_power_off(struct urtwn_softc *); static int urtwn_llt_init(struct urtwn_softc *); #ifndef URTWN_WITHOUT_UCODE static void urtwn_fw_reset(struct urtwn_softc *); static void urtwn_r88e_fw_reset(struct urtwn_softc *); static int urtwn_fw_loadpage(struct urtwn_softc *, int, const uint8_t *, int); static int urtwn_load_firmware(struct urtwn_softc *); #endif static int urtwn_dma_init(struct urtwn_softc *); static int urtwn_mac_init(struct urtwn_softc *); static void urtwn_bb_init(struct urtwn_softc *); static void urtwn_rf_init(struct urtwn_softc *); static void urtwn_cam_init(struct urtwn_softc *); static int urtwn_cam_write(struct urtwn_softc *, uint32_t, uint32_t); static void urtwn_pa_bias_init(struct urtwn_softc *); static void urtwn_rxfilter_init(struct urtwn_softc *); static void urtwn_edca_init(struct urtwn_softc *); static void urtwn_write_txpower(struct urtwn_softc *, int, uint16_t[]); static void urtwn_get_txpower(struct urtwn_softc *, int, struct ieee80211_channel *, struct ieee80211_channel *, uint16_t[]); static void urtwn_r88e_get_txpower(struct urtwn_softc *, int, struct ieee80211_channel *, struct ieee80211_channel *, uint16_t[]); static void urtwn_set_txpower(struct urtwn_softc *, struct ieee80211_channel *, struct ieee80211_channel *); static void urtwn_set_rx_bssid_all(struct urtwn_softc *, int); static void urtwn_set_gain(struct urtwn_softc *, uint8_t); static void urtwn_scan_start(struct ieee80211com *); static void urtwn_scan_end(struct ieee80211com *); static void urtwn_set_channel(struct ieee80211com *); static int urtwn_wme_update(struct ieee80211com *); static void urtwn_update_slot(struct ieee80211com *); static void urtwn_update_slot_cb(struct urtwn_softc *, union sec_param *); static void urtwn_update_aifs(struct urtwn_softc *, uint8_t); static void urtwn_set_promisc(struct urtwn_softc *); static void urtwn_update_promisc(struct ieee80211com *); static void urtwn_update_mcast(struct ieee80211com *); static struct ieee80211_node *urtwn_r88e_node_alloc(struct ieee80211vap *, const uint8_t mac[IEEE80211_ADDR_LEN]); static void urtwn_r88e_newassoc(struct ieee80211_node *, int); static void urtwn_r88e_node_free(struct ieee80211_node *); static void urtwn_set_chan(struct urtwn_softc *, struct ieee80211_channel *, struct ieee80211_channel *); static void urtwn_iq_calib(struct urtwn_softc *); static void urtwn_lc_calib(struct urtwn_softc *); static void urtwn_temp_calib(struct urtwn_softc *); static int urtwn_init(struct urtwn_softc *); static void urtwn_stop(struct urtwn_softc *); static void urtwn_abort_xfers(struct urtwn_softc *); static int urtwn_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void urtwn_ms_delay(struct urtwn_softc *); /* Aliases. */ #define urtwn_bb_write urtwn_write_4 #define urtwn_bb_read urtwn_read_4 static const struct usb_config urtwn_config[URTWN_N_TRANSFER] = { [URTWN_BULK_RX] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = URTWN_RXBUFSZ, .flags = { .pipe_bof = 1, .short_xfer_ok = 1 }, .callback = urtwn_bulk_rx_callback, }, [URTWN_BULK_TX_BE] = { .type = UE_BULK, .endpoint = 0x03, .direction = UE_DIR_OUT, .bufsize = URTWN_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1 }, .callback = urtwn_bulk_tx_callback, .timeout = URTWN_TX_TIMEOUT, /* ms */ }, [URTWN_BULK_TX_BK] = { .type = UE_BULK, .endpoint = 0x03, .direction = UE_DIR_OUT, .bufsize = URTWN_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1, }, .callback = urtwn_bulk_tx_callback, .timeout = URTWN_TX_TIMEOUT, /* ms */ }, [URTWN_BULK_TX_VI] = { .type = UE_BULK, .endpoint = 0x02, .direction = UE_DIR_OUT, .bufsize = URTWN_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1 }, .callback = urtwn_bulk_tx_callback, .timeout = URTWN_TX_TIMEOUT, /* ms */ }, [URTWN_BULK_TX_VO] = { .type = UE_BULK, .endpoint = 0x02, .direction = UE_DIR_OUT, .bufsize = URTWN_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1 }, .callback = urtwn_bulk_tx_callback, .timeout = URTWN_TX_TIMEOUT, /* ms */ }, }; static const struct wme_to_queue { uint16_t reg; uint8_t qid; } wme2queue[WME_NUM_AC] = { { R92C_EDCA_BE_PARAM, URTWN_BULK_TX_BE}, { R92C_EDCA_BK_PARAM, URTWN_BULK_TX_BK}, { R92C_EDCA_VI_PARAM, URTWN_BULK_TX_VI}, { R92C_EDCA_VO_PARAM, URTWN_BULK_TX_VO} }; static int urtwn_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != URTWN_CONFIG_INDEX) return (ENXIO); if (uaa->info.bIfaceIndex != URTWN_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(urtwn_devs, sizeof(urtwn_devs), uaa)); } static int urtwn_attach(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); struct urtwn_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)]; int error; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; if (USB_GET_DRIVER_INFO(uaa) == URTWN_RTL8188E) sc->chip |= URTWN_CHIP_88E; #ifdef USB_DEBUG int debug; if (resource_int_value(device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev), "debug", &debug) == 0) sc->sc_debug = debug; #endif mtx_init(&sc->sc_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF); URTWN_CMDQ_LOCK_INIT(sc); URTWN_NT_LOCK_INIT(sc); callout_init(&sc->sc_calib_to, 0); callout_init(&sc->sc_watchdog_ch, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); sc->sc_iface_index = URTWN_IFACE_INDEX; error = usbd_transfer_setup(uaa->device, &sc->sc_iface_index, sc->sc_xfer, urtwn_config, URTWN_N_TRANSFER, sc, &sc->sc_mtx); if (error) { device_printf(self, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); goto detach; } URTWN_LOCK(sc); error = urtwn_read_chipid(sc); if (error) { device_printf(sc->sc_dev, "unsupported test chip\n"); URTWN_UNLOCK(sc); goto detach; } /* Determine number of Tx/Rx chains. */ if (sc->chip & URTWN_CHIP_92C) { sc->ntxchains = (sc->chip & URTWN_CHIP_92C_1T2R) ? 1 : 2; sc->nrxchains = 2; } else { sc->ntxchains = 1; sc->nrxchains = 1; } if (sc->chip & URTWN_CHIP_88E) error = urtwn_r88e_read_rom(sc); else error = urtwn_read_rom(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: cannot read rom, error %d\n", __func__, error); URTWN_UNLOCK(sc); goto detach; } device_printf(sc->sc_dev, "MAC/BB RTL%s, RF 6052 %dT%dR\n", (sc->chip & URTWN_CHIP_92C) ? "8192CU" : (sc->chip & URTWN_CHIP_88E) ? "8188EU" : (sc->board_type == R92C_BOARD_TYPE_HIGHPA) ? "8188RU" : (sc->board_type == R92C_BOARD_TYPE_MINICARD) ? "8188CE-VAU" : "8188CUS", sc->ntxchains, sc->nrxchains); URTWN_UNLOCK(sc); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(self); ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_IBSS /* adhoc mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_WPA /* 802.11i */ | IEEE80211_C_WME /* 802.11e */ ; ic->ic_cryptocaps = IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_TKIP | IEEE80211_CRYPTO_AES_CCM; memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); ieee80211_init_channels(ic, NULL, bands); ieee80211_ifattach(ic); ic->ic_raw_xmit = urtwn_raw_xmit; ic->ic_scan_start = urtwn_scan_start; ic->ic_scan_end = urtwn_scan_end; ic->ic_set_channel = urtwn_set_channel; ic->ic_transmit = urtwn_transmit; ic->ic_parent = urtwn_parent; ic->ic_vap_create = urtwn_vap_create; ic->ic_vap_delete = urtwn_vap_delete; ic->ic_wme.wme_update = urtwn_wme_update; ic->ic_updateslot = urtwn_update_slot; ic->ic_update_promisc = urtwn_update_promisc; ic->ic_update_mcast = urtwn_update_mcast; if (sc->chip & URTWN_CHIP_88E) { ic->ic_node_alloc = urtwn_r88e_node_alloc; ic->ic_newassoc = urtwn_r88e_newassoc; sc->sc_node_free = ic->ic_node_free; ic->ic_node_free = urtwn_r88e_node_free; } ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), URTWN_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), URTWN_RX_RADIOTAP_PRESENT); TASK_INIT(&sc->cmdq_task, 0, urtwn_cmdq_cb, sc); urtwn_sysctlattach(sc); if (bootverbose) ieee80211_announce(ic); return (0); detach: urtwn_detach(self); return (ENXIO); /* failure */ } static void urtwn_sysctlattach(struct urtwn_softc *sc) { #ifdef USB_DEBUG struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); SYSCTL_ADD_U32(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, "control debugging printfs"); #endif } static int urtwn_detach(device_t self) { struct urtwn_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; unsigned int x; /* Prevent further ioctls. */ URTWN_LOCK(sc); sc->sc_flags |= URTWN_DETACHED; URTWN_UNLOCK(sc); urtwn_stop(sc); callout_drain(&sc->sc_watchdog_ch); callout_drain(&sc->sc_calib_to); /* stop all USB transfers */ usbd_transfer_unsetup(sc->sc_xfer, URTWN_N_TRANSFER); /* Prevent further allocations from RX/TX data lists. */ URTWN_LOCK(sc); STAILQ_INIT(&sc->sc_tx_active); STAILQ_INIT(&sc->sc_tx_inactive); STAILQ_INIT(&sc->sc_tx_pending); STAILQ_INIT(&sc->sc_rx_active); STAILQ_INIT(&sc->sc_rx_inactive); URTWN_UNLOCK(sc); /* drain USB transfers */ for (x = 0; x != URTWN_N_TRANSFER; x++) usbd_transfer_drain(sc->sc_xfer[x]); /* Free data buffers. */ URTWN_LOCK(sc); urtwn_free_tx_list(sc); urtwn_free_rx_list(sc); URTWN_UNLOCK(sc); if (ic->ic_softc == sc) { ieee80211_draintask(ic, &sc->cmdq_task); ieee80211_ifdetach(ic); } URTWN_NT_LOCK_DESTROY(sc); URTWN_CMDQ_LOCK_DESTROY(sc); mtx_destroy(&sc->sc_mtx); return (0); } static void urtwn_drain_mbufq(struct urtwn_softc *sc) { struct mbuf *m; struct ieee80211_node *ni; URTWN_ASSERT_LOCKED(sc); while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; m->m_pkthdr.rcvif = NULL; ieee80211_free_node(ni); m_freem(m); } } static usb_error_t urtwn_do_request(struct urtwn_softc *sc, struct usb_device_request *req, void *data) { usb_error_t err; int ntries = 10; URTWN_ASSERT_LOCKED(sc); while (ntries--) { err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 0, NULL, 250 /* ms */); if (err == 0) break; URTWN_DPRINTF(sc, URTWN_DEBUG_USB, "%s: control request failed, %s (retries left: %d)\n", __func__, usbd_errstr(err), ntries); usb_pause_mtx(&sc->sc_mtx, hz / 100); } return (err); } static struct ieee80211vap * urtwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct urtwn_softc *sc = ic->ic_softc; struct urtwn_vap *uvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return (NULL); uvp = malloc(sizeof(struct urtwn_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &uvp->vap; /* enable s/w bmiss handling for sta mode */ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid) != 0) { /* out of memory */ free(uvp, M_80211_VAP); return (NULL); } if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_IBSS) urtwn_init_beacon(sc, uvp); /* override state transition machine */ uvp->newstate = vap->iv_newstate; vap->iv_newstate = urtwn_newstate; vap->iv_update_beacon = urtwn_update_beacon; vap->iv_key_alloc = urtwn_key_alloc; vap->iv_key_set = urtwn_key_set; vap->iv_key_delete = urtwn_key_delete; if (opmode == IEEE80211_M_IBSS) { uvp->recv_mgmt = vap->iv_recv_mgmt; vap->iv_recv_mgmt = urtwn_ibss_recv_mgmt; TASK_INIT(&uvp->tsf_task_adhoc, 0, urtwn_tsf_task_adhoc, vap); } if (URTWN_CHIP_HAS_RATECTL(sc)) ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); ic->ic_opmode = opmode; return (vap); } static void urtwn_vap_delete(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct urtwn_softc *sc = ic->ic_softc; struct urtwn_vap *uvp = URTWN_VAP(vap); if (uvp->bcn_mbuf != NULL) m_freem(uvp->bcn_mbuf); if (vap->iv_opmode == IEEE80211_M_IBSS) ieee80211_draintask(ic, &uvp->tsf_task_adhoc); if (URTWN_CHIP_HAS_RATECTL(sc)) ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(uvp, M_80211_VAP); } static struct mbuf * urtwn_rx_copy_to_mbuf(struct urtwn_softc *sc, struct r92c_rx_stat *stat, int totlen) { struct ieee80211com *ic = &sc->sc_ic; struct mbuf *m; uint32_t rxdw0; int pktlen; /* * don't pass packets to the ieee80211 framework if the driver isn't * RUNNING. */ if (!(sc->sc_flags & URTWN_RUNNING)) return (NULL); rxdw0 = le32toh(stat->rxdw0); if (rxdw0 & (R92C_RXDW0_CRCERR | R92C_RXDW0_ICVERR)) { /* * This should not happen since we setup our Rx filter * to not receive these frames. */ URTWN_DPRINTF(sc, URTWN_DEBUG_RECV, "%s: RX flags error (%s)\n", __func__, rxdw0 & R92C_RXDW0_CRCERR ? "CRC" : "ICV"); goto fail; } pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN); if (pktlen < sizeof(struct ieee80211_frame_ack)) { URTWN_DPRINTF(sc, URTWN_DEBUG_RECV, "%s: frame is too short: %d\n", __func__, pktlen); goto fail; } if (__predict_false(totlen > MCLBYTES)) { /* convert to m_getjcl if this happens */ device_printf(sc->sc_dev, "%s: frame too long: %d (%d)\n", __func__, pktlen, totlen); goto fail; } m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (__predict_false(m == NULL)) { device_printf(sc->sc_dev, "%s: could not allocate RX mbuf\n", __func__); goto fail; } /* Finalize mbuf. */ memcpy(mtod(m, uint8_t *), (uint8_t *)stat, totlen); m->m_pkthdr.len = m->m_len = totlen; return (m); fail: counter_u64_add(ic->ic_ierrors, 1); return (NULL); } static struct mbuf * urtwn_report_intr(struct usb_xfer *xfer, struct urtwn_data *data) { struct urtwn_softc *sc = data->sc; struct ieee80211com *ic = &sc->sc_ic; struct r92c_rx_stat *stat; uint8_t *buf; int len; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); if (len < sizeof(*stat)) { counter_u64_add(ic->ic_ierrors, 1); return (NULL); } buf = data->buf; stat = (struct r92c_rx_stat *)buf; if (sc->chip & URTWN_CHIP_88E) { int report_sel = MS(le32toh(stat->rxdw3), R88E_RXDW3_RPT); switch (report_sel) { case R88E_RXDW3_RPT_RX: return (urtwn_rxeof(sc, buf, len)); case R88E_RXDW3_RPT_TX1: urtwn_r88e_ratectl_tx_complete(sc, &stat[1]); break; default: URTWN_DPRINTF(sc, URTWN_DEBUG_INTR, "%s: case %d was not handled\n", __func__, report_sel); break; } } else return (urtwn_rxeof(sc, buf, len)); return (NULL); } static struct mbuf * urtwn_rxeof(struct urtwn_softc *sc, uint8_t *buf, int len) { struct r92c_rx_stat *stat; struct mbuf *m, *m0 = NULL, *prevm = NULL; uint32_t rxdw0; int totlen, pktlen, infosz, npkts; /* Get the number of encapsulated frames. */ stat = (struct r92c_rx_stat *)buf; npkts = MS(le32toh(stat->rxdw2), R92C_RXDW2_PKTCNT); URTWN_DPRINTF(sc, URTWN_DEBUG_RECV, "%s: Rx %d frames in one chunk\n", __func__, npkts); /* Process all of them. */ while (npkts-- > 0) { if (len < sizeof(*stat)) break; stat = (struct r92c_rx_stat *)buf; rxdw0 = le32toh(stat->rxdw0); pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN); if (pktlen == 0) break; infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8; /* Make sure everything fits in xfer. */ totlen = sizeof(*stat) + infosz + pktlen; if (totlen > len) break; m = urtwn_rx_copy_to_mbuf(sc, stat, totlen); if (m0 == NULL) m0 = m; if (prevm == NULL) prevm = m; else { prevm->m_next = m; prevm = m; } /* Next chunk is 128-byte aligned. */ totlen = (totlen + 127) & ~127; buf += totlen; len -= totlen; } return (m0); } static void urtwn_r88e_ratectl_tx_complete(struct urtwn_softc *sc, void *arg) { struct r88e_tx_rpt_ccx *rpt = arg; struct ieee80211vap *vap; struct ieee80211_node *ni; uint8_t macid; int ntries; macid = MS(rpt->rptb1, R88E_RPTB1_MACID); ntries = MS(rpt->rptb2, R88E_RPTB2_RETRY_CNT); URTWN_NT_LOCK(sc); ni = sc->node_list[macid]; if (ni != NULL) { vap = ni->ni_vap; URTWN_DPRINTF(sc, URTWN_DEBUG_INTR, "%s: frame for macid %d was" "%s sent (%d retries)\n", __func__, macid, (rpt->rptb1 & R88E_RPTB1_PKT_OK) ? "" : " not", ntries); if (rpt->rptb1 & R88E_RPTB1_PKT_OK) { ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_SUCCESS, &ntries, NULL); } else { ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_FAILURE, &ntries, NULL); } } else { URTWN_DPRINTF(sc, URTWN_DEBUG_INTR, "%s: macid %d, ni is NULL\n", __func__, macid); } URTWN_NT_UNLOCK(sc); } static struct ieee80211_node * urtwn_rx_frame(struct urtwn_softc *sc, struct mbuf *m, int8_t *rssi_p) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_frame_min *wh; struct r92c_rx_stat *stat; uint32_t rxdw0, rxdw3; uint8_t rate, cipher; int8_t rssi = URTWN_NOISE_FLOOR + 1; int infosz; stat = mtod(m, struct r92c_rx_stat *); rxdw0 = le32toh(stat->rxdw0); rxdw3 = le32toh(stat->rxdw3); rate = MS(rxdw3, R92C_RXDW3_RATE); cipher = MS(rxdw0, R92C_RXDW0_CIPHER); infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8; /* Get RSSI from PHY status descriptor if present. */ if (infosz != 0 && (rxdw0 & R92C_RXDW0_PHYST)) { if (sc->chip & URTWN_CHIP_88E) rssi = urtwn_r88e_get_rssi(sc, rate, &stat[1]); else rssi = urtwn_get_rssi(sc, rate, &stat[1]); /* Update our average RSSI. */ urtwn_update_avgrssi(sc, rate, rssi); } if (ieee80211_radiotap_active(ic)) { struct urtwn_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; urtwn_get_tsf(sc, &tap->wr_tsft); if (__predict_false(le32toh((uint32_t)tap->wr_tsft) < le32toh(stat->rxdw5))) { tap->wr_tsft = le32toh(tap->wr_tsft >> 32) - 1; tap->wr_tsft = (uint64_t)htole32(tap->wr_tsft) << 32; } else tap->wr_tsft &= 0xffffffff00000000; tap->wr_tsft += stat->rxdw5; /* Map HW rate index to 802.11 rate. */ if (!(rxdw3 & R92C_RXDW3_HT)) { tap->wr_rate = ridx2rate[rate]; } else if (rate >= 12) { /* MCS0~15. */ /* Bit 7 set means HT MCS instead of rate. */ tap->wr_rate = 0x80 | (rate - 12); } tap->wr_dbm_antsignal = rssi; tap->wr_dbm_antnoise = URTWN_NOISE_FLOOR; } *rssi_p = rssi; /* Drop descriptor. */ m_adj(m, sizeof(*stat) + infosz); wh = mtod(m, struct ieee80211_frame_min *); if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && cipher != R92C_CAM_ALGO_NONE) { m->m_flags |= M_WEP; } if (m->m_len >= sizeof(*wh)) return (ieee80211_find_rxnode(ic, wh)); return (NULL); } static void urtwn_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct urtwn_softc *sc = usbd_xfer_softc(xfer); struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni; struct mbuf *m = NULL, *next; struct urtwn_data *data; int8_t nf, rssi; URTWN_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_rx_active); if (data == NULL) goto tr_setup; STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); m = urtwn_report_intr(xfer, data); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->sc_rx_inactive); if (data == NULL) { KASSERT(m == NULL, ("mbuf isn't NULL")); return; } STAILQ_REMOVE_HEAD(&sc->sc_rx_inactive, next); STAILQ_INSERT_TAIL(&sc->sc_rx_active, data, next); usbd_xfer_set_frame_data(xfer, 0, data->buf, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); /* * To avoid LOR we should unlock our private mutex here to call * ieee80211_input() because here is at the end of a USB * callback and safe to unlock. */ while (m != NULL) { next = m->m_next; m->m_next = NULL; ni = urtwn_rx_frame(sc, m, &rssi); URTWN_UNLOCK(sc); nf = URTWN_NOISE_FLOOR; if (ni != NULL) { (void)ieee80211_input(ni, m, rssi - nf, nf); ieee80211_free_node(ni); } else { (void)ieee80211_input_all(ic, m, rssi - nf, nf); } URTWN_LOCK(sc); m = next; } break; default: /* needs it to the inactive queue due to a error. */ data = STAILQ_FIRST(&sc->sc_rx_active); if (data != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } break; } } static void urtwn_txeof(struct urtwn_softc *sc, struct urtwn_data *data, int status) { URTWN_ASSERT_LOCKED(sc); if (data->ni != NULL) /* not a beacon frame */ ieee80211_tx_complete(data->ni, data->m, status); data->ni = NULL; data->m = NULL; sc->sc_txtimer = 0; STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data, next); } static int urtwn_alloc_list(struct urtwn_softc *sc, struct urtwn_data data[], int ndata, int maxsz) { int i, error; for (i = 0; i < ndata; i++) { struct urtwn_data *dp = &data[i]; dp->sc = sc; dp->m = NULL; dp->buf = malloc(maxsz, M_USBDEV, M_NOWAIT); if (dp->buf == NULL) { device_printf(sc->sc_dev, "could not allocate buffer\n"); error = ENOMEM; goto fail; } dp->ni = NULL; } return (0); fail: urtwn_free_list(sc, data, ndata); return (error); } static int urtwn_alloc_rx_list(struct urtwn_softc *sc) { int error, i; error = urtwn_alloc_list(sc, sc->sc_rx, URTWN_RX_LIST_COUNT, URTWN_RXBUFSZ); if (error != 0) return (error); STAILQ_INIT(&sc->sc_rx_active); STAILQ_INIT(&sc->sc_rx_inactive); for (i = 0; i < URTWN_RX_LIST_COUNT; i++) STAILQ_INSERT_HEAD(&sc->sc_rx_inactive, &sc->sc_rx[i], next); return (0); } static int urtwn_alloc_tx_list(struct urtwn_softc *sc) { int error, i; error = urtwn_alloc_list(sc, sc->sc_tx, URTWN_TX_LIST_COUNT, URTWN_TXBUFSZ); if (error != 0) return (error); STAILQ_INIT(&sc->sc_tx_active); STAILQ_INIT(&sc->sc_tx_inactive); STAILQ_INIT(&sc->sc_tx_pending); for (i = 0; i < URTWN_TX_LIST_COUNT; i++) STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, &sc->sc_tx[i], next); return (0); } static void urtwn_free_list(struct urtwn_softc *sc, struct urtwn_data data[], int ndata) { int i; for (i = 0; i < ndata; i++) { struct urtwn_data *dp = &data[i]; if (dp->buf != NULL) { free(dp->buf, M_USBDEV); dp->buf = NULL; } if (dp->ni != NULL) { ieee80211_free_node(dp->ni); dp->ni = NULL; } } } static void urtwn_free_rx_list(struct urtwn_softc *sc) { urtwn_free_list(sc, sc->sc_rx, URTWN_RX_LIST_COUNT); } static void urtwn_free_tx_list(struct urtwn_softc *sc) { urtwn_free_list(sc, sc->sc_tx, URTWN_TX_LIST_COUNT); } static void urtwn_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error) { struct urtwn_softc *sc = usbd_xfer_softc(xfer); struct urtwn_data *data; URTWN_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)){ case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_tx_active); if (data == NULL) goto tr_setup; STAILQ_REMOVE_HEAD(&sc->sc_tx_active, next); urtwn_txeof(sc, data, 0); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->sc_tx_pending); if (data == NULL) { URTWN_DPRINTF(sc, URTWN_DEBUG_XMIT, "%s: empty pending queue\n", __func__); goto finish; } STAILQ_REMOVE_HEAD(&sc->sc_tx_pending, next); STAILQ_INSERT_TAIL(&sc->sc_tx_active, data, next); usbd_xfer_set_frame_data(xfer, 0, data->buf, data->buflen); usbd_transfer_submit(xfer); break; default: data = STAILQ_FIRST(&sc->sc_tx_active); if (data == NULL) goto tr_setup; STAILQ_REMOVE_HEAD(&sc->sc_tx_active, next); urtwn_txeof(sc, data, 1); if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto tr_setup; } break; } finish: /* Kick-start more transmit */ urtwn_start(sc); } static struct urtwn_data * _urtwn_getbuf(struct urtwn_softc *sc) { struct urtwn_data *bf; bf = STAILQ_FIRST(&sc->sc_tx_inactive); if (bf != NULL) STAILQ_REMOVE_HEAD(&sc->sc_tx_inactive, next); else { URTWN_DPRINTF(sc, URTWN_DEBUG_XMIT, "%s: out of xmit buffers\n", __func__); } return (bf); } static struct urtwn_data * urtwn_getbuf(struct urtwn_softc *sc) { struct urtwn_data *bf; URTWN_ASSERT_LOCKED(sc); bf = _urtwn_getbuf(sc); if (bf == NULL) { URTWN_DPRINTF(sc, URTWN_DEBUG_XMIT, "%s: stop queue\n", __func__); } return (bf); } static usb_error_t urtwn_write_region_1(struct urtwn_softc *sc, uint16_t addr, uint8_t *buf, int len) { usb_device_request_t req; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = R92C_REQ_REGS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, len); return (urtwn_do_request(sc, &req, buf)); } static usb_error_t urtwn_write_1(struct urtwn_softc *sc, uint16_t addr, uint8_t val) { return (urtwn_write_region_1(sc, addr, &val, sizeof(val))); } static usb_error_t urtwn_write_2(struct urtwn_softc *sc, uint16_t addr, uint16_t val) { val = htole16(val); return (urtwn_write_region_1(sc, addr, (uint8_t *)&val, sizeof(val))); } static usb_error_t urtwn_write_4(struct urtwn_softc *sc, uint16_t addr, uint32_t val) { val = htole32(val); return (urtwn_write_region_1(sc, addr, (uint8_t *)&val, sizeof(val))); } static usb_error_t urtwn_read_region_1(struct urtwn_softc *sc, uint16_t addr, uint8_t *buf, int len) { usb_device_request_t req; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = R92C_REQ_REGS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, len); return (urtwn_do_request(sc, &req, buf)); } static uint8_t urtwn_read_1(struct urtwn_softc *sc, uint16_t addr) { uint8_t val; if (urtwn_read_region_1(sc, addr, &val, 1) != 0) return (0xff); return (val); } static uint16_t urtwn_read_2(struct urtwn_softc *sc, uint16_t addr) { uint16_t val; if (urtwn_read_region_1(sc, addr, (uint8_t *)&val, 2) != 0) return (0xffff); return (le16toh(val)); } static uint32_t urtwn_read_4(struct urtwn_softc *sc, uint16_t addr) { uint32_t val; if (urtwn_read_region_1(sc, addr, (uint8_t *)&val, 4) != 0) return (0xffffffff); return (le32toh(val)); } static int urtwn_fw_cmd(struct urtwn_softc *sc, uint8_t id, const void *buf, int len) { struct r92c_fw_cmd cmd; usb_error_t error; int ntries; if (!(sc->sc_flags & URTWN_FW_LOADED)) { URTWN_DPRINTF(sc, URTWN_DEBUG_FIRMWARE, "%s: firmware " "was not loaded; command (id %d) will be discarded\n", __func__, id); return (0); } /* Wait for current FW box to be empty. */ for (ntries = 0; ntries < 100; ntries++) { if (!(urtwn_read_1(sc, R92C_HMETFR) & (1 << sc->fwcur))) break; urtwn_ms_delay(sc); } if (ntries == 100) { device_printf(sc->sc_dev, "could not send firmware command\n"); return (ETIMEDOUT); } memset(&cmd, 0, sizeof(cmd)); cmd.id = id; if (len > 3) cmd.id |= R92C_CMD_FLAG_EXT; KASSERT(len <= sizeof(cmd.msg), ("urtwn_fw_cmd\n")); memcpy(cmd.msg, buf, len); /* Write the first word last since that will trigger the FW. */ error = urtwn_write_region_1(sc, R92C_HMEBOX_EXT(sc->fwcur), (uint8_t *)&cmd + 4, 2); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); error = urtwn_write_region_1(sc, R92C_HMEBOX(sc->fwcur), (uint8_t *)&cmd + 0, 4); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); sc->fwcur = (sc->fwcur + 1) % R92C_H2C_NBOX; return (0); } static void urtwn_cmdq_cb(void *arg, int pending) { struct urtwn_softc *sc = arg; struct urtwn_cmdq *item; /* * Device must be powered on (via urtwn_power_on()) * before any command may be sent. */ URTWN_LOCK(sc); if (!(sc->sc_flags & URTWN_RUNNING)) { URTWN_UNLOCK(sc); return; } URTWN_CMDQ_LOCK(sc); while (sc->cmdq[sc->cmdq_first].func != NULL) { item = &sc->cmdq[sc->cmdq_first]; sc->cmdq_first = (sc->cmdq_first + 1) % URTWN_CMDQ_SIZE; URTWN_CMDQ_UNLOCK(sc); item->func(sc, &item->data); URTWN_CMDQ_LOCK(sc); memset(item, 0, sizeof (*item)); } URTWN_CMDQ_UNLOCK(sc); URTWN_UNLOCK(sc); } static int urtwn_cmd_sleepable(struct urtwn_softc *sc, const void *ptr, size_t len, CMD_FUNC_PROTO) { struct ieee80211com *ic = &sc->sc_ic; KASSERT(len <= sizeof(union sec_param), ("buffer overflow")); URTWN_CMDQ_LOCK(sc); if (sc->cmdq[sc->cmdq_last].func != NULL) { device_printf(sc->sc_dev, "%s: cmdq overflow\n", __func__); URTWN_CMDQ_UNLOCK(sc); return (EAGAIN); } if (ptr != NULL) memcpy(&sc->cmdq[sc->cmdq_last].data, ptr, len); sc->cmdq[sc->cmdq_last].func = func; sc->cmdq_last = (sc->cmdq_last + 1) % URTWN_CMDQ_SIZE; URTWN_CMDQ_UNLOCK(sc); ieee80211_runtask(ic, &sc->cmdq_task); return (0); } static __inline void urtwn_rf_write(struct urtwn_softc *sc, int chain, uint8_t addr, uint32_t val) { sc->sc_rf_write(sc, chain, addr, val); } static void urtwn_r92c_rf_write(struct urtwn_softc *sc, int chain, uint8_t addr, uint32_t val) { urtwn_bb_write(sc, R92C_LSSI_PARAM(chain), SM(R92C_LSSI_PARAM_ADDR, addr) | SM(R92C_LSSI_PARAM_DATA, val)); } static void urtwn_r88e_rf_write(struct urtwn_softc *sc, int chain, uint8_t addr, uint32_t val) { urtwn_bb_write(sc, R92C_LSSI_PARAM(chain), SM(R88E_LSSI_PARAM_ADDR, addr) | SM(R92C_LSSI_PARAM_DATA, val)); } static uint32_t urtwn_rf_read(struct urtwn_softc *sc, int chain, uint8_t addr) { uint32_t reg[R92C_MAX_CHAINS], val; reg[0] = urtwn_bb_read(sc, R92C_HSSI_PARAM2(0)); if (chain != 0) reg[chain] = urtwn_bb_read(sc, R92C_HSSI_PARAM2(chain)); urtwn_bb_write(sc, R92C_HSSI_PARAM2(0), reg[0] & ~R92C_HSSI_PARAM2_READ_EDGE); urtwn_ms_delay(sc); urtwn_bb_write(sc, R92C_HSSI_PARAM2(chain), RW(reg[chain], R92C_HSSI_PARAM2_READ_ADDR, addr) | R92C_HSSI_PARAM2_READ_EDGE); urtwn_ms_delay(sc); urtwn_bb_write(sc, R92C_HSSI_PARAM2(0), reg[0] | R92C_HSSI_PARAM2_READ_EDGE); urtwn_ms_delay(sc); if (urtwn_bb_read(sc, R92C_HSSI_PARAM1(chain)) & R92C_HSSI_PARAM1_PI) val = urtwn_bb_read(sc, R92C_HSPI_READBACK(chain)); else val = urtwn_bb_read(sc, R92C_LSSI_READBACK(chain)); return (MS(val, R92C_LSSI_READBACK_DATA)); } static int urtwn_llt_write(struct urtwn_softc *sc, uint32_t addr, uint32_t data) { usb_error_t error; int ntries; error = urtwn_write_4(sc, R92C_LLT_INIT, SM(R92C_LLT_INIT_OP, R92C_LLT_INIT_OP_WRITE) | SM(R92C_LLT_INIT_ADDR, addr) | SM(R92C_LLT_INIT_DATA, data)); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Wait for write operation to complete. */ for (ntries = 0; ntries < 20; ntries++) { if (MS(urtwn_read_4(sc, R92C_LLT_INIT), R92C_LLT_INIT_OP) == R92C_LLT_INIT_OP_NO_ACTIVE) return (0); urtwn_ms_delay(sc); } return (ETIMEDOUT); } static int urtwn_efuse_read_next(struct urtwn_softc *sc, uint8_t *val) { uint32_t reg; usb_error_t error; int ntries; if (sc->last_rom_addr >= URTWN_EFUSE_MAX_LEN) return (EFAULT); reg = urtwn_read_4(sc, R92C_EFUSE_CTRL); reg = RW(reg, R92C_EFUSE_CTRL_ADDR, sc->last_rom_addr); reg &= ~R92C_EFUSE_CTRL_VALID; error = urtwn_write_4(sc, R92C_EFUSE_CTRL, reg); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Wait for read operation to complete. */ for (ntries = 0; ntries < 100; ntries++) { reg = urtwn_read_4(sc, R92C_EFUSE_CTRL); if (reg & R92C_EFUSE_CTRL_VALID) break; urtwn_ms_delay(sc); } if (ntries == 100) { device_printf(sc->sc_dev, "could not read efuse byte at address 0x%x\n", sc->last_rom_addr); return (ETIMEDOUT); } *val = MS(reg, R92C_EFUSE_CTRL_DATA); sc->last_rom_addr++; return (0); } static int urtwn_efuse_read_data(struct urtwn_softc *sc, uint8_t *rom, uint8_t off, uint8_t msk) { uint8_t reg; int i, error; for (i = 0; i < 4; i++) { if (msk & (1 << i)) continue; error = urtwn_efuse_read_next(sc, ®); if (error != 0) return (error); URTWN_DPRINTF(sc, URTWN_DEBUG_ROM, "rom[0x%03X] == 0x%02X\n", off * 8 + i * 2, reg); rom[off * 8 + i * 2 + 0] = reg; error = urtwn_efuse_read_next(sc, ®); if (error != 0) return (error); URTWN_DPRINTF(sc, URTWN_DEBUG_ROM, "rom[0x%03X] == 0x%02X\n", off * 8 + i * 2 + 1, reg); rom[off * 8 + i * 2 + 1] = reg; } return (0); } #ifdef USB_DEBUG static void urtwn_dump_rom_contents(struct urtwn_softc *sc, uint8_t *rom, uint16_t size) { int i; /* Dump ROM contents. */ device_printf(sc->sc_dev, "%s:", __func__); for (i = 0; i < size; i++) { if (i % 32 == 0) printf("\n%03X: ", i); else if (i % 4 == 0) printf(" "); printf("%02X", rom[i]); } printf("\n"); } #endif static int urtwn_efuse_read(struct urtwn_softc *sc, uint8_t *rom, uint16_t size) { #define URTWN_CHK(res) do { \ if ((error = res) != 0) \ goto end; \ } while(0) uint8_t msk, off, reg; int error; URTWN_CHK(urtwn_efuse_switch_power(sc)); /* Read full ROM image. */ sc->last_rom_addr = 0; memset(rom, 0xff, size); URTWN_CHK(urtwn_efuse_read_next(sc, ®)); while (reg != 0xff) { /* check for extended header */ if ((sc->chip & URTWN_CHIP_88E) && (reg & 0x1f) == 0x0f) { off = reg >> 5; URTWN_CHK(urtwn_efuse_read_next(sc, ®)); if ((reg & 0x0f) != 0x0f) off = ((reg & 0xf0) >> 1) | off; else continue; } else off = reg >> 4; msk = reg & 0xf; URTWN_CHK(urtwn_efuse_read_data(sc, rom, off, msk)); URTWN_CHK(urtwn_efuse_read_next(sc, ®)); } end: #ifdef USB_DEBUG if (sc->sc_debug & URTWN_DEBUG_ROM) urtwn_dump_rom_contents(sc, rom, size); #endif urtwn_write_1(sc, R92C_EFUSE_ACCESS, R92C_EFUSE_ACCESS_OFF); if (error != 0) { device_printf(sc->sc_dev, "%s: error while reading ROM\n", __func__); } return (error); #undef URTWN_CHK } static int urtwn_efuse_switch_power(struct urtwn_softc *sc) { usb_error_t error; uint32_t reg; error = urtwn_write_1(sc, R92C_EFUSE_ACCESS, R92C_EFUSE_ACCESS_ON); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); reg = urtwn_read_2(sc, R92C_SYS_ISO_CTRL); if (!(reg & R92C_SYS_ISO_CTRL_PWC_EV12V)) { error = urtwn_write_2(sc, R92C_SYS_ISO_CTRL, reg | R92C_SYS_ISO_CTRL_PWC_EV12V); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); } reg = urtwn_read_2(sc, R92C_SYS_FUNC_EN); if (!(reg & R92C_SYS_FUNC_EN_ELDR)) { error = urtwn_write_2(sc, R92C_SYS_FUNC_EN, reg | R92C_SYS_FUNC_EN_ELDR); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); } reg = urtwn_read_2(sc, R92C_SYS_CLKR); if ((reg & (R92C_SYS_CLKR_LOADER_EN | R92C_SYS_CLKR_ANA8M)) != (R92C_SYS_CLKR_LOADER_EN | R92C_SYS_CLKR_ANA8M)) { error = urtwn_write_2(sc, R92C_SYS_CLKR, reg | R92C_SYS_CLKR_LOADER_EN | R92C_SYS_CLKR_ANA8M); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); } return (0); } static int urtwn_read_chipid(struct urtwn_softc *sc) { uint32_t reg; if (sc->chip & URTWN_CHIP_88E) return (0); reg = urtwn_read_4(sc, R92C_SYS_CFG); if (reg & R92C_SYS_CFG_TRP_VAUX_EN) return (EIO); if (reg & R92C_SYS_CFG_TYPE_92C) { sc->chip |= URTWN_CHIP_92C; /* Check if it is a castrated 8192C. */ if (MS(urtwn_read_4(sc, R92C_HPON_FSM), R92C_HPON_FSM_CHIP_BONDING_ID) == R92C_HPON_FSM_CHIP_BONDING_ID_92C_1T2R) sc->chip |= URTWN_CHIP_92C_1T2R; } if (reg & R92C_SYS_CFG_VENDOR_UMC) { sc->chip |= URTWN_CHIP_UMC; if (MS(reg, R92C_SYS_CFG_CHIP_VER_RTL) == 0) sc->chip |= URTWN_CHIP_UMC_A_CUT; } return (0); } static int urtwn_read_rom(struct urtwn_softc *sc) { struct r92c_rom *rom = &sc->rom.r92c_rom; int error; /* Read full ROM image. */ error = urtwn_efuse_read(sc, (uint8_t *)rom, sizeof(*rom)); if (error != 0) return (error); /* XXX Weird but this is what the vendor driver does. */ sc->last_rom_addr = 0x1fa; error = urtwn_efuse_read_next(sc, &sc->pa_setting); if (error != 0) return (error); URTWN_DPRINTF(sc, URTWN_DEBUG_ROM, "%s: PA setting=0x%x\n", __func__, sc->pa_setting); sc->board_type = MS(rom->rf_opt1, R92C_ROM_RF1_BOARD_TYPE); sc->regulatory = MS(rom->rf_opt1, R92C_ROM_RF1_REGULATORY); URTWN_DPRINTF(sc, URTWN_DEBUG_ROM, "%s: regulatory type=%d\n", __func__, sc->regulatory); IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, rom->macaddr); sc->sc_rf_write = urtwn_r92c_rf_write; sc->sc_power_on = urtwn_r92c_power_on; sc->sc_power_off = urtwn_r92c_power_off; return (0); } static int urtwn_r88e_read_rom(struct urtwn_softc *sc) { struct r88e_rom *rom = &sc->rom.r88e_rom; int error; error = urtwn_efuse_read(sc, (uint8_t *)rom, sizeof(sc->rom.r88e_rom)); if (error != 0) return (error); sc->bw20_tx_pwr_diff = (rom->tx_pwr_diff >> 4); if (sc->bw20_tx_pwr_diff & 0x08) sc->bw20_tx_pwr_diff |= 0xf0; sc->ofdm_tx_pwr_diff = (rom->tx_pwr_diff & 0xf); if (sc->ofdm_tx_pwr_diff & 0x08) sc->ofdm_tx_pwr_diff |= 0xf0; sc->regulatory = MS(rom->rf_board_opt, R92C_ROM_RF1_REGULATORY); URTWN_DPRINTF(sc, URTWN_DEBUG_ROM, "%s: regulatory type %d\n", __func__,sc->regulatory); IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, rom->macaddr); sc->sc_rf_write = urtwn_r88e_rf_write; sc->sc_power_on = urtwn_r88e_power_on; sc->sc_power_off = urtwn_r88e_power_off; return (0); } /* * Initialize rate adaptation in firmware. */ static int urtwn_ra_init(struct urtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ieee80211_node *ni; struct ieee80211_rateset *rs; struct r92c_fw_cmd_macid_cfg cmd; uint32_t rates, basicrates; uint8_t mode; int maxrate, maxbasicrate, error, i, j; ni = ieee80211_ref_node(vap->iv_bss); rs = &ni->ni_rates; /* Get normal and basic rates mask. */ rates = basicrates = 0; maxrate = maxbasicrate = 0; for (i = 0; i < rs->rs_nrates; i++) { /* Convert 802.11 rate to HW rate index. */ for (j = 0; j < nitems(ridx2rate); j++) if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == ridx2rate[j]) break; if (j == nitems(ridx2rate)) /* Unknown rate, skip. */ continue; rates |= 1 << j; if (j > maxrate) maxrate = j; if (rs->rs_rates[i] & IEEE80211_RATE_BASIC) { basicrates |= 1 << j; if (j > maxbasicrate) maxbasicrate = j; } } if (ic->ic_curmode == IEEE80211_MODE_11B) mode = R92C_RAID_11B; else mode = R92C_RAID_11BG; URTWN_DPRINTF(sc, URTWN_DEBUG_RA, "%s: mode 0x%x, rates 0x%08x, basicrates 0x%08x\n", __func__, mode, rates, basicrates); /* Set rates mask for group addressed frames. */ cmd.macid = URTWN_MACID_BC | URTWN_MACID_VALID; cmd.mask = htole32(mode << 28 | basicrates); error = urtwn_fw_cmd(sc, R92C_CMD_MACID_CONFIG, &cmd, sizeof(cmd)); if (error != 0) { ieee80211_free_node(ni); device_printf(sc->sc_dev, "could not add broadcast station\n"); return (error); } /* Set initial MRR rate. */ URTWN_DPRINTF(sc, URTWN_DEBUG_RA, "%s: maxbasicrate %d\n", __func__, maxbasicrate); urtwn_write_1(sc, R92C_INIDATA_RATE_SEL(URTWN_MACID_BC), maxbasicrate); /* Set rates mask for unicast frames. */ cmd.macid = URTWN_MACID_BSS | URTWN_MACID_VALID; cmd.mask = htole32(mode << 28 | rates); error = urtwn_fw_cmd(sc, R92C_CMD_MACID_CONFIG, &cmd, sizeof(cmd)); if (error != 0) { ieee80211_free_node(ni); device_printf(sc->sc_dev, "could not add BSS station\n"); return (error); } /* Set initial MRR rate. */ URTWN_DPRINTF(sc, URTWN_DEBUG_RA, "%s: maxrate %d\n", __func__, maxrate); urtwn_write_1(sc, R92C_INIDATA_RATE_SEL(URTWN_MACID_BSS), maxrate); /* Indicate highest supported rate. */ ni->ni_txrate = rs->rs_rates[rs->rs_nrates - 1]; ieee80211_free_node(ni); return (0); } static void urtwn_init_beacon(struct urtwn_softc *sc, struct urtwn_vap *uvp) { struct r92c_tx_desc *txd = &uvp->bcn_desc; txd->txdw0 = htole32( SM(R92C_TXDW0_OFFSET, sizeof(*txd)) | R92C_TXDW0_BMCAST | R92C_TXDW0_OWN | R92C_TXDW0_FSG | R92C_TXDW0_LSG); txd->txdw1 = htole32( SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BEACON) | SM(R92C_TXDW1_RAID, R92C_RAID_11B)); if (sc->chip & URTWN_CHIP_88E) { txd->txdw1 |= htole32(SM(R88E_TXDW1_MACID, URTWN_MACID_BC)); txd->txdseq |= htole16(R88E_TXDSEQ_HWSEQ_EN); } else { txd->txdw1 |= htole32(SM(R92C_TXDW1_MACID, URTWN_MACID_BC)); txd->txdw4 |= htole32(R92C_TXDW4_HWSEQ_EN); } txd->txdw4 = htole32(R92C_TXDW4_DRVRATE); txd->txdw5 = htole32(SM(R92C_TXDW5_DATARATE, URTWN_RIDX_CCK1)); } static int urtwn_setup_beacon(struct urtwn_softc *sc, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct urtwn_vap *uvp = URTWN_VAP(vap); struct mbuf *m; int error; URTWN_ASSERT_LOCKED(sc); if (ni->ni_chan == IEEE80211_CHAN_ANYC) return (EINVAL); m = ieee80211_beacon_alloc(ni); if (m == NULL) { device_printf(sc->sc_dev, "%s: could not allocate beacon frame\n", __func__); return (ENOMEM); } if (uvp->bcn_mbuf != NULL) m_freem(uvp->bcn_mbuf); uvp->bcn_mbuf = m; if ((error = urtwn_tx_beacon(sc, uvp)) != 0) return (error); /* XXX bcnq stuck workaround */ if ((error = urtwn_tx_beacon(sc, uvp)) != 0) return (error); URTWN_DPRINTF(sc, URTWN_DEBUG_BEACON, "%s: beacon was %srecognized\n", __func__, urtwn_read_1(sc, R92C_TDECTRL + 2) & (R92C_TDECTRL_BCN_VALID >> 16) ? "" : "not "); return (0); } static void urtwn_update_beacon(struct ieee80211vap *vap, int item) { struct urtwn_softc *sc = vap->iv_ic->ic_softc; struct urtwn_vap *uvp = URTWN_VAP(vap); struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; struct ieee80211_node *ni = vap->iv_bss; int mcast = 0; URTWN_LOCK(sc); if (uvp->bcn_mbuf == NULL) { uvp->bcn_mbuf = ieee80211_beacon_alloc(ni); if (uvp->bcn_mbuf == NULL) { device_printf(sc->sc_dev, "%s: could not allocate beacon frame\n", __func__); URTWN_UNLOCK(sc); return; } } URTWN_UNLOCK(sc); if (item == IEEE80211_BEACON_TIM) mcast = 1; /* XXX */ setbit(bo->bo_flags, item); ieee80211_beacon_update(ni, uvp->bcn_mbuf, mcast); URTWN_LOCK(sc); urtwn_tx_beacon(sc, uvp); URTWN_UNLOCK(sc); } /* * Push a beacon frame into the chip. Beacon will * be repeated by the chip every R92C_BCN_INTERVAL. */ static int urtwn_tx_beacon(struct urtwn_softc *sc, struct urtwn_vap *uvp) { struct r92c_tx_desc *desc = &uvp->bcn_desc; struct urtwn_data *bf; URTWN_ASSERT_LOCKED(sc); bf = urtwn_getbuf(sc); if (bf == NULL) return (ENOMEM); memcpy(bf->buf, desc, sizeof(*desc)); urtwn_tx_start(sc, uvp->bcn_mbuf, IEEE80211_FC0_TYPE_MGT, bf); sc->sc_txtimer = 5; callout_reset(&sc->sc_watchdog_ch, hz, urtwn_watchdog, sc); return (0); } static int urtwn_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k, ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) { struct urtwn_softc *sc = vap->iv_ic->ic_softc; uint8_t i; if (!(&vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) { if (!(k->wk_flags & IEEE80211_KEY_SWCRYPT)) { URTWN_LOCK(sc); /* * First 4 slots for group keys, * what is left - for pairwise. * XXX incompatible with IBSS RSN. */ for (i = IEEE80211_WEP_NKID; i < R92C_CAM_ENTRY_COUNT; i++) { if ((sc->keys_bmap & (1 << i)) == 0) { sc->keys_bmap |= 1 << i; *keyix = i; break; } } URTWN_UNLOCK(sc); if (i == R92C_CAM_ENTRY_COUNT) { device_printf(sc->sc_dev, "%s: no free space in the key table\n", __func__); return 0; } } else *keyix = 0; } else { *keyix = k - vap->iv_nw_keys; } *rxkeyix = *keyix; return 1; } static void urtwn_key_set_cb(struct urtwn_softc *sc, union sec_param *data) { struct ieee80211_key *k = &data->key; uint8_t algo, keyid; int i, error; if (k->wk_keyix < IEEE80211_WEP_NKID) keyid = k->wk_keyix; else keyid = 0; /* Map net80211 cipher to HW crypto algorithm. */ switch (k->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_WEP: if (k->wk_keylen < 8) algo = R92C_CAM_ALGO_WEP40; else algo = R92C_CAM_ALGO_WEP104; break; case IEEE80211_CIPHER_TKIP: algo = R92C_CAM_ALGO_TKIP; break; case IEEE80211_CIPHER_AES_CCM: algo = R92C_CAM_ALGO_AES; break; default: device_printf(sc->sc_dev, "%s: undefined cipher %d\n", __func__, k->wk_cipher->ic_cipher); return; } URTWN_DPRINTF(sc, URTWN_DEBUG_KEY, "%s: keyix %d, keyid %d, algo %d/%d, flags %04X, len %d, " "macaddr %s\n", __func__, k->wk_keyix, keyid, k->wk_cipher->ic_cipher, algo, k->wk_flags, k->wk_keylen, ether_sprintf(k->wk_macaddr)); /* Write key. */ for (i = 0; i < 4; i++) { error = urtwn_cam_write(sc, R92C_CAM_KEY(k->wk_keyix, i), LE_READ_4(&k->wk_key[i * 4])); if (error != 0) goto fail; } /* Write CTL0 last since that will validate the CAM entry. */ error = urtwn_cam_write(sc, R92C_CAM_CTL1(k->wk_keyix), LE_READ_4(&k->wk_macaddr[2])); if (error != 0) goto fail; error = urtwn_cam_write(sc, R92C_CAM_CTL0(k->wk_keyix), SM(R92C_CAM_ALGO, algo) | SM(R92C_CAM_KEYID, keyid) | SM(R92C_CAM_MACLO, LE_READ_2(&k->wk_macaddr[0])) | R92C_CAM_VALID); if (error != 0) goto fail; return; fail: device_printf(sc->sc_dev, "%s fails, error %d\n", __func__, error); } static void urtwn_key_del_cb(struct urtwn_softc *sc, union sec_param *data) { struct ieee80211_key *k = &data->key; int i; URTWN_DPRINTF(sc, URTWN_DEBUG_KEY, "%s: keyix %d, flags %04X, macaddr %s\n", __func__, k->wk_keyix, k->wk_flags, ether_sprintf(k->wk_macaddr)); urtwn_cam_write(sc, R92C_CAM_CTL0(k->wk_keyix), 0); urtwn_cam_write(sc, R92C_CAM_CTL1(k->wk_keyix), 0); /* Clear key. */ for (i = 0; i < 4; i++) urtwn_cam_write(sc, R92C_CAM_KEY(k->wk_keyix, i), 0); sc->keys_bmap &= ~(1 << k->wk_keyix); } static int urtwn_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct urtwn_softc *sc = vap->iv_ic->ic_softc; if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { /* Not for us. */ return (1); } return (!urtwn_cmd_sleepable(sc, k, sizeof(*k), urtwn_key_set_cb)); } static int urtwn_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct urtwn_softc *sc = vap->iv_ic->ic_softc; if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { /* Not for us. */ return (1); } return (!urtwn_cmd_sleepable(sc, k, sizeof(*k), urtwn_key_del_cb)); } static void urtwn_tsf_task_adhoc(void *arg, int pending) { struct ieee80211vap *vap = arg; struct urtwn_softc *sc = vap->iv_ic->ic_softc; struct ieee80211_node *ni; uint32_t reg; URTWN_LOCK(sc); ni = ieee80211_ref_node(vap->iv_bss); reg = urtwn_read_1(sc, R92C_BCN_CTRL); /* Accept beacons with the same BSSID. */ urtwn_set_rx_bssid_all(sc, 0); /* Enable synchronization. */ reg &= ~R92C_BCN_CTRL_DIS_TSF_UDT0; urtwn_write_1(sc, R92C_BCN_CTRL, reg); /* Synchronize. */ usb_pause_mtx(&sc->sc_mtx, hz * ni->ni_intval * 5 / 1000); /* Disable synchronization. */ reg |= R92C_BCN_CTRL_DIS_TSF_UDT0; urtwn_write_1(sc, R92C_BCN_CTRL, reg); /* Remove beacon filter. */ urtwn_set_rx_bssid_all(sc, 1); /* Enable beaconing. */ urtwn_write_1(sc, R92C_MBID_NUM, urtwn_read_1(sc, R92C_MBID_NUM) | R92C_MBID_TXBCN_RPT0); reg |= R92C_BCN_CTRL_EN_BCN; urtwn_write_1(sc, R92C_BCN_CTRL, reg); ieee80211_free_node(ni); URTWN_UNLOCK(sc); } static void urtwn_tsf_sync_enable(struct urtwn_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = &sc->sc_ic; struct urtwn_vap *uvp = URTWN_VAP(vap); /* Reset TSF. */ urtwn_write_1(sc, R92C_DUAL_TSF_RST, R92C_DUAL_TSF_RST0); switch (vap->iv_opmode) { case IEEE80211_M_STA: /* Enable TSF synchronization. */ urtwn_write_1(sc, R92C_BCN_CTRL, urtwn_read_1(sc, R92C_BCN_CTRL) & ~R92C_BCN_CTRL_DIS_TSF_UDT0); break; case IEEE80211_M_IBSS: ieee80211_runtask(ic, &uvp->tsf_task_adhoc); break; case IEEE80211_M_HOSTAP: /* Enable beaconing. */ urtwn_write_1(sc, R92C_MBID_NUM, urtwn_read_1(sc, R92C_MBID_NUM) | R92C_MBID_TXBCN_RPT0); urtwn_write_1(sc, R92C_BCN_CTRL, urtwn_read_1(sc, R92C_BCN_CTRL) | R92C_BCN_CTRL_EN_BCN); break; default: device_printf(sc->sc_dev, "undefined opmode %d\n", vap->iv_opmode); return; } } static void urtwn_get_tsf(struct urtwn_softc *sc, uint64_t *buf) { urtwn_read_region_1(sc, R92C_TSFTR, (uint8_t *)buf, sizeof(*buf)); } static void urtwn_set_led(struct urtwn_softc *sc, int led, int on) { uint8_t reg; if (led == URTWN_LED_LINK) { if (sc->chip & URTWN_CHIP_88E) { reg = urtwn_read_1(sc, R92C_LEDCFG2) & 0xf0; urtwn_write_1(sc, R92C_LEDCFG2, reg | 0x60); if (!on) { reg = urtwn_read_1(sc, R92C_LEDCFG2) & 0x90; urtwn_write_1(sc, R92C_LEDCFG2, reg | R92C_LEDCFG0_DIS); urtwn_write_1(sc, R92C_MAC_PINMUX_CFG, urtwn_read_1(sc, R92C_MAC_PINMUX_CFG) & 0xfe); } } else { reg = urtwn_read_1(sc, R92C_LEDCFG0) & 0x70; if (!on) reg |= R92C_LEDCFG0_DIS; urtwn_write_1(sc, R92C_LEDCFG0, reg); } sc->ledlink = on; /* Save LED state. */ } } static void urtwn_set_mode(struct urtwn_softc *sc, uint8_t mode) { uint8_t reg; reg = urtwn_read_1(sc, R92C_MSR); reg = (reg & ~R92C_MSR_MASK) | mode; urtwn_write_1(sc, R92C_MSR, reg); } static void urtwn_ibss_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct urtwn_softc *sc = vap->iv_ic->ic_softc; struct urtwn_vap *uvp = URTWN_VAP(vap); uint64_t ni_tstamp, curr_tstamp; uvp->recv_mgmt(ni, m, subtype, rxs, rssi, nf); if (vap->iv_state == IEEE80211_S_RUN && (subtype == IEEE80211_FC0_SUBTYPE_BEACON || subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { ni_tstamp = le64toh(ni->ni_tstamp.tsf); URTWN_LOCK(sc); urtwn_get_tsf(sc, &curr_tstamp); URTWN_UNLOCK(sc); curr_tstamp = le64toh(curr_tstamp); if (ni_tstamp >= curr_tstamp) (void) ieee80211_ibss_merge(ni); } } static int urtwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct urtwn_vap *uvp = URTWN_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct urtwn_softc *sc = ic->ic_softc; struct ieee80211_node *ni; enum ieee80211_state ostate; uint32_t reg; uint8_t mode; int error = 0; ostate = vap->iv_state; URTWN_DPRINTF(sc, URTWN_DEBUG_STATE, "%s -> %s\n", ieee80211_state_name[ostate], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); URTWN_LOCK(sc); callout_stop(&sc->sc_watchdog_ch); if (ostate == IEEE80211_S_RUN) { /* Stop calibration. */ callout_stop(&sc->sc_calib_to); /* Turn link LED off. */ urtwn_set_led(sc, URTWN_LED_LINK, 0); /* Set media status to 'No Link'. */ urtwn_set_mode(sc, R92C_MSR_NOLINK); /* Stop Rx of data frames. */ urtwn_write_2(sc, R92C_RXFLTMAP2, 0); /* Disable TSF synchronization. */ urtwn_write_1(sc, R92C_BCN_CTRL, (urtwn_read_1(sc, R92C_BCN_CTRL) & ~R92C_BCN_CTRL_EN_BCN) | R92C_BCN_CTRL_DIS_TSF_UDT0); /* Disable beaconing. */ urtwn_write_1(sc, R92C_MBID_NUM, urtwn_read_1(sc, R92C_MBID_NUM) & ~R92C_MBID_TXBCN_RPT0); /* Reset TSF. */ urtwn_write_1(sc, R92C_DUAL_TSF_RST, R92C_DUAL_TSF_RST0); /* Reset EDCA parameters. */ urtwn_write_4(sc, R92C_EDCA_VO_PARAM, 0x002f3217); urtwn_write_4(sc, R92C_EDCA_VI_PARAM, 0x005e4317); urtwn_write_4(sc, R92C_EDCA_BE_PARAM, 0x00105320); urtwn_write_4(sc, R92C_EDCA_BK_PARAM, 0x0000a444); } switch (nstate) { case IEEE80211_S_INIT: /* Turn link LED off. */ urtwn_set_led(sc, URTWN_LED_LINK, 0); break; case IEEE80211_S_SCAN: /* Pause AC Tx queues. */ urtwn_write_1(sc, R92C_TXPAUSE, urtwn_read_1(sc, R92C_TXPAUSE) | R92C_TX_QUEUE_AC); break; case IEEE80211_S_AUTH: urtwn_set_chan(sc, ic->ic_curchan, NULL); break; case IEEE80211_S_RUN: if (vap->iv_opmode == IEEE80211_M_MONITOR) { /* Turn link LED on. */ urtwn_set_led(sc, URTWN_LED_LINK, 1); break; } ni = ieee80211_ref_node(vap->iv_bss); if (ic->ic_bsschan == IEEE80211_CHAN_ANYC || ni->ni_chan == IEEE80211_CHAN_ANYC) { device_printf(sc->sc_dev, "%s: could not move to RUN state\n", __func__); error = EINVAL; goto end_run; } switch (vap->iv_opmode) { case IEEE80211_M_STA: mode = R92C_MSR_INFRA; break; case IEEE80211_M_IBSS: mode = R92C_MSR_ADHOC; break; case IEEE80211_M_HOSTAP: mode = R92C_MSR_AP; break; default: device_printf(sc->sc_dev, "undefined opmode %d\n", vap->iv_opmode); error = EINVAL; goto end_run; } /* Set media status to 'Associated'. */ urtwn_set_mode(sc, mode); /* Set BSSID. */ urtwn_write_4(sc, R92C_BSSID + 0, LE_READ_4(&ni->ni_bssid[0])); urtwn_write_4(sc, R92C_BSSID + 4, LE_READ_2(&ni->ni_bssid[4])); if (ic->ic_curmode == IEEE80211_MODE_11B) urtwn_write_1(sc, R92C_INIRTS_RATE_SEL, 0); else /* 802.11b/g */ urtwn_write_1(sc, R92C_INIRTS_RATE_SEL, 3); /* Enable Rx of data frames. */ urtwn_write_2(sc, R92C_RXFLTMAP2, 0xffff); /* Flush all AC queues. */ urtwn_write_1(sc, R92C_TXPAUSE, 0); /* Set beacon interval. */ urtwn_write_2(sc, R92C_BCN_INTERVAL, ni->ni_intval); /* Allow Rx from our BSSID only. */ if (ic->ic_promisc == 0) { reg = urtwn_read_4(sc, R92C_RCR); if (vap->iv_opmode != IEEE80211_M_HOSTAP) reg |= R92C_RCR_CBSSID_DATA; if (vap->iv_opmode != IEEE80211_M_IBSS) reg |= R92C_RCR_CBSSID_BCN; urtwn_write_4(sc, R92C_RCR, reg); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) { error = urtwn_setup_beacon(sc, ni); if (error != 0) { device_printf(sc->sc_dev, "unable to push beacon into the chip, " "error %d\n", error); goto end_run; } } /* Enable TSF synchronization. */ urtwn_tsf_sync_enable(sc, vap); urtwn_write_1(sc, R92C_SIFS_CCK + 1, 10); urtwn_write_1(sc, R92C_SIFS_OFDM + 1, 10); urtwn_write_1(sc, R92C_SPEC_SIFS + 1, 10); urtwn_write_1(sc, R92C_MAC_SPEC_SIFS + 1, 10); urtwn_write_1(sc, R92C_R2T_SIFS + 1, 10); urtwn_write_1(sc, R92C_T2T_SIFS + 1, 10); /* Intialize rate adaptation. */ if (!(sc->chip & URTWN_CHIP_88E)) urtwn_ra_init(sc); /* Turn link LED on. */ urtwn_set_led(sc, URTWN_LED_LINK, 1); sc->avg_pwdb = -1; /* Reset average RSSI. */ /* Reset temperature calibration state machine. */ sc->sc_flags &= ~URTWN_TEMP_MEASURED; sc->thcal_lctemp = 0; /* Start periodic calibration. */ callout_reset(&sc->sc_calib_to, 2*hz, urtwn_calib_to, sc); end_run: ieee80211_free_node(ni); break; default: break; } URTWN_UNLOCK(sc); IEEE80211_LOCK(ic); return (error != 0 ? error : uvp->newstate(vap, nstate, arg)); } static void urtwn_calib_to(void *arg) { struct urtwn_softc *sc = arg; /* Do it in a process context. */ urtwn_cmd_sleepable(sc, NULL, 0, urtwn_calib_cb); } static void urtwn_calib_cb(struct urtwn_softc *sc, union sec_param *data) { /* Do temperature compensation. */ urtwn_temp_calib(sc); if ((urtwn_read_1(sc, R92C_MSR) & R92C_MSR_MASK) != R92C_MSR_NOLINK) callout_reset(&sc->sc_calib_to, 2*hz, urtwn_calib_to, sc); } static void urtwn_watchdog(void *arg) { struct urtwn_softc *sc = arg; if (sc->sc_txtimer > 0) { if (--sc->sc_txtimer == 0) { device_printf(sc->sc_dev, "device timeout\n"); counter_u64_add(sc->sc_ic.ic_oerrors, 1); return; } callout_reset(&sc->sc_watchdog_ch, hz, urtwn_watchdog, sc); } } static void urtwn_update_avgrssi(struct urtwn_softc *sc, int rate, int8_t rssi) { int pwdb; /* Convert antenna signal to percentage. */ if (rssi <= -100 || rssi >= 20) pwdb = 0; else if (rssi >= 0) pwdb = 100; else pwdb = 100 + rssi; if (!(sc->chip & URTWN_CHIP_88E)) { if (rate <= URTWN_RIDX_CCK11) { /* CCK gain is smaller than OFDM/MCS gain. */ pwdb += 6; if (pwdb > 100) pwdb = 100; if (pwdb <= 14) pwdb -= 4; else if (pwdb <= 26) pwdb -= 8; else if (pwdb <= 34) pwdb -= 6; else if (pwdb <= 42) pwdb -= 2; } } if (sc->avg_pwdb == -1) /* Init. */ sc->avg_pwdb = pwdb; else if (sc->avg_pwdb < pwdb) sc->avg_pwdb = ((sc->avg_pwdb * 19 + pwdb) / 20) + 1; else sc->avg_pwdb = ((sc->avg_pwdb * 19 + pwdb) / 20); URTWN_DPRINTF(sc, URTWN_DEBUG_RA, "%s: PWDB %d, EMA %d\n", __func__, pwdb, sc->avg_pwdb); } static int8_t urtwn_get_rssi(struct urtwn_softc *sc, int rate, void *physt) { static const int8_t cckoff[] = { 16, -12, -26, -46 }; struct r92c_rx_phystat *phy; struct r92c_rx_cck *cck; uint8_t rpt; int8_t rssi; if (rate <= URTWN_RIDX_CCK11) { cck = (struct r92c_rx_cck *)physt; if (sc->sc_flags & URTWN_FLAG_CCK_HIPWR) { rpt = (cck->agc_rpt >> 5) & 0x3; rssi = (cck->agc_rpt & 0x1f) << 1; } else { rpt = (cck->agc_rpt >> 6) & 0x3; rssi = cck->agc_rpt & 0x3e; } rssi = cckoff[rpt] - rssi; } else { /* OFDM/HT. */ phy = (struct r92c_rx_phystat *)physt; rssi = ((le32toh(phy->phydw1) >> 1) & 0x7f) - 110; } return (rssi); } static int8_t urtwn_r88e_get_rssi(struct urtwn_softc *sc, int rate, void *physt) { struct r92c_rx_phystat *phy; struct r88e_rx_cck *cck; uint8_t cck_agc_rpt, lna_idx, vga_idx; int8_t rssi; rssi = 0; if (rate <= URTWN_RIDX_CCK11) { cck = (struct r88e_rx_cck *)physt; cck_agc_rpt = cck->agc_rpt; lna_idx = (cck_agc_rpt & 0xe0) >> 5; vga_idx = cck_agc_rpt & 0x1f; switch (lna_idx) { case 7: if (vga_idx <= 27) rssi = -100 + 2* (27 - vga_idx); else rssi = -100; break; case 6: rssi = -48 + 2 * (2 - vga_idx); break; case 5: rssi = -42 + 2 * (7 - vga_idx); break; case 4: rssi = -36 + 2 * (7 - vga_idx); break; case 3: rssi = -24 + 2 * (7 - vga_idx); break; case 2: rssi = -12 + 2 * (5 - vga_idx); break; case 1: rssi = 8 - (2 * vga_idx); break; case 0: rssi = 14 - (2 * vga_idx); break; } rssi += 6; } else { /* OFDM/HT. */ phy = (struct r92c_rx_phystat *)physt; rssi = ((le32toh(phy->phydw1) >> 1) & 0x7f) - 110; } return (rssi); } static __inline uint8_t rate2ridx(uint8_t rate) { switch (rate) { case 12: return 4; case 18: return 5; case 24: return 6; case 36: return 7; case 48: return 8; case 72: return 9; case 96: return 10; case 108: return 11; case 2: return 0; case 4: return 1; case 11: return 2; case 22: return 3; default: return 0; } } static int urtwn_tx_data(struct urtwn_softc *sc, struct ieee80211_node *ni, struct mbuf *m, struct urtwn_data *data) { const struct ieee80211_txparam *tp; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_key *k = NULL; struct ieee80211_channel *chan; struct ieee80211_frame *wh; struct r92c_tx_desc *txd; uint8_t macid, raid, rate, ridx, subtype, type, tid, qsel; int hasqos, ismcast; URTWN_ASSERT_LOCKED(sc); /* * Software crypto. */ wh = mtod(m, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; hasqos = IEEE80211_QOS_HAS_SEQ(wh); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); /* Select TX ring for this frame. */ if (hasqos) { tid = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; tid &= IEEE80211_QOS_TID; } else tid = 0; chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ? ni->ni_chan : ic->ic_curchan; tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; /* Choose a TX rate index. */ if (type == IEEE80211_FC0_TYPE_MGT) rate = tp->mgmtrate; else if (ismcast) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else if (m->m_flags & M_EAPOL) rate = tp->mgmtrate; else { if (URTWN_CHIP_HAS_RATECTL(sc)) { /* XXX pass pktlen */ (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } else { if (ic->ic_curmode != IEEE80211_MODE_11B) rate = 108; else rate = 22; } } ridx = rate2ridx(rate); if (ic->ic_curmode != IEEE80211_MODE_11B) raid = R92C_RAID_11BG; else raid = R92C_RAID_11B; if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m); if (k == NULL) { device_printf(sc->sc_dev, "ieee80211_crypto_encap returns NULL.\n"); return (ENOBUFS); } /* in case packet header moved, reset pointer */ wh = mtod(m, struct ieee80211_frame *); } /* Fill Tx descriptor. */ txd = (struct r92c_tx_desc *)data->buf; memset(txd, 0, sizeof(*txd)); txd->txdw0 |= htole32( SM(R92C_TXDW0_OFFSET, sizeof(*txd)) | R92C_TXDW0_OWN | R92C_TXDW0_FSG | R92C_TXDW0_LSG); if (ismcast) txd->txdw0 |= htole32(R92C_TXDW0_BMCAST); if (!ismcast) { if (sc->chip & URTWN_CHIP_88E) { struct urtwn_node *un = URTWN_NODE(ni); macid = un->id; } else macid = URTWN_MACID_BSS; if (type == IEEE80211_FC0_TYPE_DATA) { qsel = tid % URTWN_MAX_TID; if (sc->chip & URTWN_CHIP_88E) { txd->txdw2 |= htole32( R88E_TXDW2_AGGBK | R88E_TXDW2_CCX_RPT); } else txd->txdw1 |= htole32(R92C_TXDW1_AGGBK); if (ic->ic_flags & IEEE80211_F_USEPROT) { switch (ic->ic_protmode) { case IEEE80211_PROT_CTSONLY: txd->txdw4 |= htole32( R92C_TXDW4_CTS2SELF | R92C_TXDW4_HWRTSEN); break; case IEEE80211_PROT_RTSCTS: txd->txdw4 |= htole32( R92C_TXDW4_RTSEN | R92C_TXDW4_HWRTSEN); break; default: break; } } txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, URTWN_RIDX_OFDM24)); txd->txdw5 |= htole32(0x0001ff00); } else /* IEEE80211_FC0_TYPE_MGT */ qsel = R92C_TXDW1_QSEL_MGNT; } else { macid = URTWN_MACID_BC; qsel = R92C_TXDW1_QSEL_MGNT; } txd->txdw1 |= htole32( SM(R92C_TXDW1_QSEL, qsel) | SM(R92C_TXDW1_RAID, raid)); if (sc->chip & URTWN_CHIP_88E) txd->txdw1 |= htole32(SM(R88E_TXDW1_MACID, macid)); else txd->txdw1 |= htole32(SM(R92C_TXDW1_MACID, macid)); txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, ridx)); /* Force this rate if needed. */ if (URTWN_CHIP_HAS_RATECTL(sc) || ismcast || (m->m_flags & M_EAPOL) || type != IEEE80211_FC0_TYPE_DATA) txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE); if (!hasqos) { /* Use HW sequence numbering for non-QoS frames. */ if (sc->chip & URTWN_CHIP_88E) txd->txdseq = htole16(R88E_TXDSEQ_HWSEQ_EN); else txd->txdw4 |= htole32(R92C_TXDW4_HWSEQ_EN); } else { /* Set sequence number. */ txd->txdseq = htole16(M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE); } if (k != NULL && !(k->wk_flags & IEEE80211_KEY_SWCRYPT)) { uint8_t cipher; switch (k->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_WEP: case IEEE80211_CIPHER_TKIP: cipher = R92C_TXDW1_CIPHER_RC4; break; case IEEE80211_CIPHER_AES_CCM: cipher = R92C_TXDW1_CIPHER_AES; break; default: device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, k->wk_cipher->ic_cipher); return (EINVAL); } txd->txdw1 |= htole32(SM(R92C_TXDW1_CIPHER, cipher)); } if (ieee80211_radiotap_active_vap(vap)) { struct urtwn_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; if (k != NULL) tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; ieee80211_radiotap_tx(vap, m); } data->ni = ni; urtwn_tx_start(sc, m, type, data); return (0); } static int urtwn_tx_raw(struct urtwn_softc *sc, struct ieee80211_node *ni, struct mbuf *m, struct urtwn_data *data, const struct ieee80211_bpf_params *params) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_key *k = NULL; struct ieee80211_frame *wh; struct r92c_tx_desc *txd; uint8_t cipher, ridx, type; /* Encrypt the frame if need be. */ cipher = R92C_TXDW1_CIPHER_NONE; if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { /* Retrieve key for TX. */ k = ieee80211_crypto_encap(ni, m); if (k == NULL) return (ENOBUFS); if (!(k->wk_flags & IEEE80211_KEY_SWCRYPT)) { switch (k->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_WEP: case IEEE80211_CIPHER_TKIP: cipher = R92C_TXDW1_CIPHER_RC4; break; case IEEE80211_CIPHER_AES_CCM: cipher = R92C_TXDW1_CIPHER_AES; break; default: device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, k->wk_cipher->ic_cipher); return (EINVAL); } } } wh = mtod(m, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; /* Fill Tx descriptor. */ txd = (struct r92c_tx_desc *)data->buf; memset(txd, 0, sizeof(*txd)); txd->txdw0 |= htole32( SM(R92C_TXDW0_OFFSET, sizeof(*txd)) | R92C_TXDW0_OWN | R92C_TXDW0_FSG | R92C_TXDW0_LSG); if (IEEE80211_IS_MULTICAST(wh->i_addr1)) txd->txdw0 |= htole32(R92C_TXDW0_BMCAST); if (params->ibp_flags & IEEE80211_BPF_RTS) txd->txdw4 |= htole32(R92C_TXDW4_RTSEN); if (params->ibp_flags & IEEE80211_BPF_CTS) txd->txdw4 |= htole32(R92C_TXDW4_CTS2SELF); if (txd->txdw4 & htole32(R92C_TXDW4_RTSEN | R92C_TXDW4_CTS2SELF)) { txd->txdw4 |= htole32(R92C_TXDW4_HWRTSEN); txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, URTWN_RIDX_OFDM24)); } if (sc->chip & URTWN_CHIP_88E) txd->txdw1 |= htole32(SM(R88E_TXDW1_MACID, URTWN_MACID_BC)); else txd->txdw1 |= htole32(SM(R92C_TXDW1_MACID, URTWN_MACID_BC)); txd->txdw1 |= htole32(SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_MGNT)); txd->txdw1 |= htole32(SM(R92C_TXDW1_CIPHER, cipher)); /* Choose a TX rate index. */ ridx = rate2ridx(params->ibp_rate0); txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, ridx)); txd->txdw5 |= htole32(0x0001ff00); txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE); if (!IEEE80211_QOS_HAS_SEQ(wh)) { /* Use HW sequence numbering for non-QoS frames. */ if (sc->chip & URTWN_CHIP_88E) txd->txdseq = htole16(R88E_TXDSEQ_HWSEQ_EN); else txd->txdw4 |= htole32(R92C_TXDW4_HWSEQ_EN); } else { /* Set sequence number. */ txd->txdseq = htole16(M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE); } if (ieee80211_radiotap_active_vap(vap)) { struct urtwn_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; if (k != NULL) tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; ieee80211_radiotap_tx(vap, m); } data->ni = ni; urtwn_tx_start(sc, m, type, data); return (0); } static void urtwn_tx_start(struct urtwn_softc *sc, struct mbuf *m, uint8_t type, struct urtwn_data *data) { struct usb_xfer *xfer; struct r92c_tx_desc *txd; uint16_t ac, sum; int i, xferlen; URTWN_ASSERT_LOCKED(sc); ac = M_WME_GETAC(m); switch (type) { case IEEE80211_FC0_TYPE_CTL: case IEEE80211_FC0_TYPE_MGT: xfer = sc->sc_xfer[URTWN_BULK_TX_VO]; break; default: xfer = sc->sc_xfer[wme2queue[ac].qid]; break; } txd = (struct r92c_tx_desc *)data->buf; txd->txdw0 |= htole32(SM(R92C_TXDW0_PKTLEN, m->m_pkthdr.len)); /* Compute Tx descriptor checksum. */ sum = 0; for (i = 0; i < sizeof(*txd) / 2; i++) sum ^= ((uint16_t *)txd)[i]; txd->txdsum = sum; /* NB: already little endian. */ xferlen = sizeof(*txd) + m->m_pkthdr.len; m_copydata(m, 0, m->m_pkthdr.len, (caddr_t)&txd[1]); data->buflen = xferlen; data->m = m; STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next); usbd_transfer_start(xfer); } static int urtwn_transmit(struct ieee80211com *ic, struct mbuf *m) { struct urtwn_softc *sc = ic->ic_softc; int error; URTWN_LOCK(sc); if ((sc->sc_flags & URTWN_RUNNING) == 0) { URTWN_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { URTWN_UNLOCK(sc); return (error); } urtwn_start(sc); URTWN_UNLOCK(sc); return (0); } static void urtwn_start(struct urtwn_softc *sc) { struct ieee80211_node *ni; struct mbuf *m; struct urtwn_data *bf; URTWN_ASSERT_LOCKED(sc); while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { bf = urtwn_getbuf(sc); if (bf == NULL) { mbufq_prepend(&sc->sc_snd, m); break; } ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; m->m_pkthdr.rcvif = NULL; if (urtwn_tx_data(sc, ni, m, bf) != 0) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); m_freem(m); ieee80211_free_node(ni); break; } sc->sc_txtimer = 5; callout_reset(&sc->sc_watchdog_ch, hz, urtwn_watchdog, sc); } } static void urtwn_parent(struct ieee80211com *ic) { struct urtwn_softc *sc = ic->ic_softc; URTWN_LOCK(sc); if (sc->sc_flags & URTWN_DETACHED) { URTWN_UNLOCK(sc); return; } URTWN_UNLOCK(sc); if (ic->ic_nrunning > 0) { if (urtwn_init(sc) != 0) { struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); if (vap != NULL) ieee80211_stop(vap); } else ieee80211_start_all(ic); } else urtwn_stop(sc); } static __inline int urtwn_power_on(struct urtwn_softc *sc) { return sc->sc_power_on(sc); } static int urtwn_r92c_power_on(struct urtwn_softc *sc) { uint32_t reg; usb_error_t error; int ntries; /* Wait for autoload done bit. */ for (ntries = 0; ntries < 1000; ntries++) { if (urtwn_read_1(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_PFM_ALDN) break; urtwn_ms_delay(sc); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for chip autoload\n"); return (ETIMEDOUT); } /* Unlock ISO/CLK/Power control register. */ error = urtwn_write_1(sc, R92C_RSV_CTRL, 0); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Move SPS into PWM mode. */ error = urtwn_write_1(sc, R92C_SPS0_CTRL, 0x2b); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); urtwn_ms_delay(sc); reg = urtwn_read_1(sc, R92C_LDOV12D_CTRL); if (!(reg & R92C_LDOV12D_CTRL_LDV12_EN)) { error = urtwn_write_1(sc, R92C_LDOV12D_CTRL, reg | R92C_LDOV12D_CTRL_LDV12_EN); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); urtwn_ms_delay(sc); error = urtwn_write_1(sc, R92C_SYS_ISO_CTRL, urtwn_read_1(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_MD2PP); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); } /* Auto enable WLAN. */ error = urtwn_write_2(sc, R92C_APS_FSMCO, urtwn_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); for (ntries = 0; ntries < 1000; ntries++) { if (!(urtwn_read_2(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_APFM_ONMAC)) break; urtwn_ms_delay(sc); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for MAC auto ON\n"); return (ETIMEDOUT); } /* Enable radio, GPIO and LED functions. */ error = urtwn_write_2(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_PDN_EN | R92C_APS_FSMCO_PFM_ALDN); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Release RF digital isolation. */ error = urtwn_write_2(sc, R92C_SYS_ISO_CTRL, urtwn_read_2(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_DIOR); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Initialize MAC. */ error = urtwn_write_1(sc, R92C_APSD_CTRL, urtwn_read_1(sc, R92C_APSD_CTRL) & ~R92C_APSD_CTRL_OFF); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); for (ntries = 0; ntries < 200; ntries++) { if (!(urtwn_read_1(sc, R92C_APSD_CTRL) & R92C_APSD_CTRL_OFF_STATUS)) break; urtwn_ms_delay(sc); } if (ntries == 200) { device_printf(sc->sc_dev, "timeout waiting for MAC initialization\n"); return (ETIMEDOUT); } /* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */ reg = urtwn_read_2(sc, R92C_CR); reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN | R92C_CR_ENSEC; error = urtwn_write_2(sc, R92C_CR, reg); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); error = urtwn_write_1(sc, 0xfe10, 0x19); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); return (0); } static int urtwn_r88e_power_on(struct urtwn_softc *sc) { uint32_t reg; usb_error_t error; int ntries; /* Wait for power ready bit. */ for (ntries = 0; ntries < 5000; ntries++) { if (urtwn_read_4(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_SUS_HOST) break; urtwn_ms_delay(sc); } if (ntries == 5000) { device_printf(sc->sc_dev, "timeout waiting for chip power up\n"); return (ETIMEDOUT); } /* Reset BB. */ error = urtwn_write_1(sc, R92C_SYS_FUNC_EN, urtwn_read_1(sc, R92C_SYS_FUNC_EN) & ~(R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST)); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); error = urtwn_write_1(sc, R92C_AFE_XTAL_CTRL + 2, urtwn_read_1(sc, R92C_AFE_XTAL_CTRL + 2) | 0x80); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Disable HWPDN. */ error = urtwn_write_2(sc, R92C_APS_FSMCO, urtwn_read_2(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_APDM_HPDN); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Disable WL suspend. */ error = urtwn_write_2(sc, R92C_APS_FSMCO, urtwn_read_2(sc, R92C_APS_FSMCO) & ~(R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE)); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); error = urtwn_write_2(sc, R92C_APS_FSMCO, urtwn_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); for (ntries = 0; ntries < 5000; ntries++) { if (!(urtwn_read_2(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_APFM_ONMAC)) break; urtwn_ms_delay(sc); } if (ntries == 5000) return (ETIMEDOUT); /* Enable LDO normal mode. */ error = urtwn_write_1(sc, R92C_LPLDO_CTRL, urtwn_read_1(sc, R92C_LPLDO_CTRL) & ~R92C_LPLDO_CTRL_SLEEP); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */ error = urtwn_write_2(sc, R92C_CR, 0); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); reg = urtwn_read_2(sc, R92C_CR); reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | R92C_CR_SCHEDULE_EN | R92C_CR_ENSEC | R92C_CR_CALTMR_EN; error = urtwn_write_2(sc, R92C_CR, reg); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); return (0); } static __inline void urtwn_power_off(struct urtwn_softc *sc) { return sc->sc_power_off(sc); } static void urtwn_r92c_power_off(struct urtwn_softc *sc) { uint32_t reg; /* Block all Tx queues. */ urtwn_write_1(sc, R92C_TXPAUSE, R92C_TX_QUEUE_ALL); /* Disable RF */ urtwn_rf_write(sc, 0, 0, 0); urtwn_write_1(sc, R92C_APSD_CTRL, R92C_APSD_CTRL_OFF); /* Reset BB state machine */ urtwn_write_1(sc, R92C_SYS_FUNC_EN, R92C_SYS_FUNC_EN_USBD | R92C_SYS_FUNC_EN_USBA | R92C_SYS_FUNC_EN_BB_GLB_RST); urtwn_write_1(sc, R92C_SYS_FUNC_EN, R92C_SYS_FUNC_EN_USBD | R92C_SYS_FUNC_EN_USBA); /* * Reset digital sequence */ #ifndef URTWN_WITHOUT_UCODE if (urtwn_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RDY) { /* Reset MCU ready status */ urtwn_write_1(sc, R92C_MCUFWDL, 0); /* If firmware in ram code, do reset */ urtwn_fw_reset(sc); } #endif /* Reset MAC and Enable 8051 */ urtwn_write_1(sc, R92C_SYS_FUNC_EN + 1, (R92C_SYS_FUNC_EN_CPUEN | R92C_SYS_FUNC_EN_ELDR | R92C_SYS_FUNC_EN_HWPDN) >> 8); /* Reset MCU ready status */ urtwn_write_1(sc, R92C_MCUFWDL, 0); /* Disable MAC clock */ urtwn_write_2(sc, R92C_SYS_CLKR, R92C_SYS_CLKR_ANAD16V_EN | R92C_SYS_CLKR_ANA8M | R92C_SYS_CLKR_LOADER_EN | R92C_SYS_CLKR_80M_SSC_DIS | R92C_SYS_CLKR_SYS_EN | R92C_SYS_CLKR_RING_EN | 0x4000); /* Disable AFE PLL */ urtwn_write_1(sc, R92C_AFE_PLL_CTRL, 0x80); /* Gated AFE DIG_CLOCK */ urtwn_write_2(sc, R92C_AFE_XTAL_CTRL, 0x880F); /* Isolated digital to PON */ urtwn_write_1(sc, R92C_SYS_ISO_CTRL, R92C_SYS_ISO_CTRL_MD2PP | R92C_SYS_ISO_CTRL_PA2PCIE | R92C_SYS_ISO_CTRL_PD2CORE | R92C_SYS_ISO_CTRL_IP2MAC | R92C_SYS_ISO_CTRL_DIOP | R92C_SYS_ISO_CTRL_DIOE); /* * Pull GPIO PIN to balance level and LED control */ /* 1. Disable GPIO[7:0] */ urtwn_write_2(sc, R92C_GPIO_IOSEL, 0x0000); reg = urtwn_read_4(sc, R92C_GPIO_PIN_CTRL) & ~0x0000ff00; reg |= ((reg << 8) & 0x0000ff00) | 0x00ff0000; urtwn_write_4(sc, R92C_GPIO_PIN_CTRL, reg); /* Disable GPIO[10:8] */ urtwn_write_1(sc, R92C_MAC_PINMUX_CFG, 0x00); reg = urtwn_read_2(sc, R92C_GPIO_IO_SEL) & ~0x00f0; reg |= (((reg & 0x000f) << 4) | 0x0780); urtwn_write_2(sc, R92C_GPIO_IO_SEL, reg); /* Disable LED0 & 1 */ urtwn_write_2(sc, R92C_LEDCFG0, 0x8080); /* * Reset digital sequence */ /* Disable ELDR clock */ urtwn_write_2(sc, R92C_SYS_CLKR, R92C_SYS_CLKR_ANAD16V_EN | R92C_SYS_CLKR_ANA8M | R92C_SYS_CLKR_LOADER_EN | R92C_SYS_CLKR_80M_SSC_DIS | R92C_SYS_CLKR_SYS_EN | R92C_SYS_CLKR_RING_EN | 0x4000); /* Isolated ELDR to PON */ urtwn_write_1(sc, R92C_SYS_ISO_CTRL + 1, (R92C_SYS_ISO_CTRL_DIOR | R92C_SYS_ISO_CTRL_PWC_EV12V) >> 8); /* * Disable analog sequence */ /* Disable A15 power */ urtwn_write_1(sc, R92C_LDOA15_CTRL, R92C_LDOA15_CTRL_OBUF); /* Disable digital core power */ urtwn_write_1(sc, R92C_LDOV12D_CTRL, urtwn_read_1(sc, R92C_LDOV12D_CTRL) & ~R92C_LDOV12D_CTRL_LDV12_EN); /* Enter PFM mode */ urtwn_write_1(sc, R92C_SPS0_CTRL, 0x23); /* Set USB suspend */ urtwn_write_2(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_APDM_HOST | R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_PFM_ALDN); /* Lock ISO/CLK/Power control register. */ urtwn_write_1(sc, R92C_RSV_CTRL, 0x0E); } static void urtwn_r88e_power_off(struct urtwn_softc *sc) { uint8_t reg; int ntries; /* Disable any kind of TX reports. */ urtwn_write_1(sc, R88E_TX_RPT_CTRL, urtwn_read_1(sc, R88E_TX_RPT_CTRL) & ~(R88E_TX_RPT1_ENA | R88E_TX_RPT2_ENA)); /* Stop Rx. */ urtwn_write_1(sc, R92C_CR, 0); /* Move card to Low Power State. */ /* Block all Tx queues. */ urtwn_write_1(sc, R92C_TXPAUSE, R92C_TX_QUEUE_ALL); for (ntries = 0; ntries < 20; ntries++) { /* Should be zero if no packet is transmitting. */ if (urtwn_read_4(sc, R88E_SCH_TXCMD) == 0) break; urtwn_ms_delay(sc); } if (ntries == 20) { device_printf(sc->sc_dev, "%s: failed to block Tx queues\n", __func__); return; } /* CCK and OFDM are disabled, and clock are gated. */ urtwn_write_1(sc, R92C_SYS_FUNC_EN, urtwn_read_1(sc, R92C_SYS_FUNC_EN) & ~R92C_SYS_FUNC_EN_BBRSTB); urtwn_ms_delay(sc); /* Reset MAC TRX */ urtwn_write_1(sc, R92C_CR, R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | R92C_CR_SCHEDULE_EN); /* check if removed later */ urtwn_write_1(sc, R92C_CR + 1, urtwn_read_1(sc, R92C_CR + 1) & ~(R92C_CR_ENSEC >> 8)); /* Respond TxOK to scheduler */ urtwn_write_1(sc, R92C_DUAL_TSF_RST, urtwn_read_1(sc, R92C_DUAL_TSF_RST) | 0x20); /* If firmware in ram code, do reset. */ #ifndef URTWN_WITHOUT_UCODE if (urtwn_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RDY) urtwn_r88e_fw_reset(sc); #endif /* Reset MCU ready status. */ urtwn_write_1(sc, R92C_MCUFWDL, 0x00); /* Disable 32k. */ urtwn_write_1(sc, R88E_32K_CTRL, urtwn_read_1(sc, R88E_32K_CTRL) & ~0x01); /* Move card to Disabled state. */ /* Turn off RF. */ urtwn_write_1(sc, R92C_RF_CTRL, 0); /* LDO Sleep mode. */ urtwn_write_1(sc, R92C_LPLDO_CTRL, urtwn_read_1(sc, R92C_LPLDO_CTRL) | R92C_LPLDO_CTRL_SLEEP); /* Turn off MAC by HW state machine */ urtwn_write_1(sc, R92C_APS_FSMCO + 1, urtwn_read_1(sc, R92C_APS_FSMCO + 1) | (R92C_APS_FSMCO_APFM_OFF >> 8)); for (ntries = 0; ntries < 20; ntries++) { /* Wait until it will be disabled. */ if ((urtwn_read_1(sc, R92C_APS_FSMCO + 1) & (R92C_APS_FSMCO_APFM_OFF >> 8)) == 0) break; urtwn_ms_delay(sc); } if (ntries == 20) { device_printf(sc->sc_dev, "%s: could not turn off MAC\n", __func__); return; } /* schmit trigger */ urtwn_write_1(sc, R92C_AFE_XTAL_CTRL + 2, urtwn_read_1(sc, R92C_AFE_XTAL_CTRL + 2) | 0x80); /* Enable WL suspend. */ urtwn_write_1(sc, R92C_APS_FSMCO + 1, (urtwn_read_1(sc, R92C_APS_FSMCO + 1) & ~0x10) | 0x08); /* Enable bandgap mbias in suspend. */ urtwn_write_1(sc, R92C_APS_FSMCO + 3, 0); /* Clear SIC_EN register. */ urtwn_write_1(sc, R92C_GPIO_MUXCFG + 1, urtwn_read_1(sc, R92C_GPIO_MUXCFG + 1) & ~0x10); /* Set USB suspend enable local register */ urtwn_write_1(sc, R92C_USB_SUSPEND, urtwn_read_1(sc, R92C_USB_SUSPEND) | 0x10); /* Reset MCU IO Wrapper. */ reg = urtwn_read_1(sc, R92C_RSV_CTRL + 1); urtwn_write_1(sc, R92C_RSV_CTRL + 1, reg & ~0x08); urtwn_write_1(sc, R92C_RSV_CTRL + 1, reg | 0x08); /* marked as 'For Power Consumption' code. */ urtwn_write_1(sc, R92C_GPIO_OUT, urtwn_read_1(sc, R92C_GPIO_IN)); urtwn_write_1(sc, R92C_GPIO_IOSEL, 0xff); urtwn_write_1(sc, R92C_GPIO_IO_SEL, urtwn_read_1(sc, R92C_GPIO_IO_SEL) << 4); urtwn_write_1(sc, R92C_GPIO_MOD, urtwn_read_1(sc, R92C_GPIO_MOD) | 0x0f); /* Set LNA, TRSW, EX_PA Pin to output mode. */ urtwn_write_4(sc, R88E_BB_PAD_CTRL, 0x00080808); } static int urtwn_llt_init(struct urtwn_softc *sc) { int i, error, page_count, pktbuf_count; page_count = (sc->chip & URTWN_CHIP_88E) ? R88E_TX_PAGE_COUNT : R92C_TX_PAGE_COUNT; pktbuf_count = (sc->chip & URTWN_CHIP_88E) ? R88E_TXPKTBUF_COUNT : R92C_TXPKTBUF_COUNT; /* Reserve pages [0; page_count]. */ for (i = 0; i < page_count; i++) { if ((error = urtwn_llt_write(sc, i, i + 1)) != 0) return (error); } /* NB: 0xff indicates end-of-list. */ if ((error = urtwn_llt_write(sc, i, 0xff)) != 0) return (error); /* * Use pages [page_count + 1; pktbuf_count - 1] * as ring buffer. */ for (++i; i < pktbuf_count - 1; i++) { if ((error = urtwn_llt_write(sc, i, i + 1)) != 0) return (error); } /* Make the last page point to the beginning of the ring buffer. */ error = urtwn_llt_write(sc, i, page_count + 1); return (error); } #ifndef URTWN_WITHOUT_UCODE static void urtwn_fw_reset(struct urtwn_softc *sc) { uint16_t reg; int ntries; /* Tell 8051 to reset itself. */ urtwn_write_1(sc, R92C_HMETFR + 3, 0x20); /* Wait until 8051 resets by itself. */ for (ntries = 0; ntries < 100; ntries++) { reg = urtwn_read_2(sc, R92C_SYS_FUNC_EN); if (!(reg & R92C_SYS_FUNC_EN_CPUEN)) return; urtwn_ms_delay(sc); } /* Force 8051 reset. */ urtwn_write_2(sc, R92C_SYS_FUNC_EN, reg & ~R92C_SYS_FUNC_EN_CPUEN); } static void urtwn_r88e_fw_reset(struct urtwn_softc *sc) { uint16_t reg; reg = urtwn_read_2(sc, R92C_SYS_FUNC_EN); urtwn_write_2(sc, R92C_SYS_FUNC_EN, reg & ~R92C_SYS_FUNC_EN_CPUEN); urtwn_write_2(sc, R92C_SYS_FUNC_EN, reg | R92C_SYS_FUNC_EN_CPUEN); } static int urtwn_fw_loadpage(struct urtwn_softc *sc, int page, const uint8_t *buf, int len) { uint32_t reg; usb_error_t error = USB_ERR_NORMAL_COMPLETION; int off, mlen; reg = urtwn_read_4(sc, R92C_MCUFWDL); reg = RW(reg, R92C_MCUFWDL_PAGE, page); urtwn_write_4(sc, R92C_MCUFWDL, reg); off = R92C_FW_START_ADDR; while (len > 0) { if (len > 196) mlen = 196; else if (len > 4) mlen = 4; else mlen = 1; /* XXX fix this deconst */ error = urtwn_write_region_1(sc, off, __DECONST(uint8_t *, buf), mlen); if (error != USB_ERR_NORMAL_COMPLETION) break; off += mlen; buf += mlen; len -= mlen; } return (error); } static int urtwn_load_firmware(struct urtwn_softc *sc) { const struct firmware *fw; const struct r92c_fw_hdr *hdr; const char *imagename; const u_char *ptr; size_t len; uint32_t reg; int mlen, ntries, page, error; URTWN_UNLOCK(sc); /* Read firmware image from the filesystem. */ if (sc->chip & URTWN_CHIP_88E) imagename = "urtwn-rtl8188eufw"; else if ((sc->chip & (URTWN_CHIP_UMC_A_CUT | URTWN_CHIP_92C)) == URTWN_CHIP_UMC_A_CUT) imagename = "urtwn-rtl8192cfwU"; else imagename = "urtwn-rtl8192cfwT"; fw = firmware_get(imagename); URTWN_LOCK(sc); if (fw == NULL) { device_printf(sc->sc_dev, "failed loadfirmware of file %s\n", imagename); return (ENOENT); } len = fw->datasize; if (len < sizeof(*hdr)) { device_printf(sc->sc_dev, "firmware too short\n"); error = EINVAL; goto fail; } ptr = fw->data; hdr = (const struct r92c_fw_hdr *)ptr; /* Check if there is a valid FW header and skip it. */ if ((le16toh(hdr->signature) >> 4) == 0x88c || (le16toh(hdr->signature) >> 4) == 0x88e || (le16toh(hdr->signature) >> 4) == 0x92c) { URTWN_DPRINTF(sc, URTWN_DEBUG_FIRMWARE, "FW V%d.%d %02d-%02d %02d:%02d\n", le16toh(hdr->version), le16toh(hdr->subversion), hdr->month, hdr->date, hdr->hour, hdr->minute); ptr += sizeof(*hdr); len -= sizeof(*hdr); } if (urtwn_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL) { if (sc->chip & URTWN_CHIP_88E) urtwn_r88e_fw_reset(sc); else urtwn_fw_reset(sc); urtwn_write_1(sc, R92C_MCUFWDL, 0); } if (!(sc->chip & URTWN_CHIP_88E)) { urtwn_write_2(sc, R92C_SYS_FUNC_EN, urtwn_read_2(sc, R92C_SYS_FUNC_EN) | R92C_SYS_FUNC_EN_CPUEN); } urtwn_write_1(sc, R92C_MCUFWDL, urtwn_read_1(sc, R92C_MCUFWDL) | R92C_MCUFWDL_EN); urtwn_write_1(sc, R92C_MCUFWDL + 2, urtwn_read_1(sc, R92C_MCUFWDL + 2) & ~0x08); /* Reset the FWDL checksum. */ urtwn_write_1(sc, R92C_MCUFWDL, urtwn_read_1(sc, R92C_MCUFWDL) | R92C_MCUFWDL_CHKSUM_RPT); for (page = 0; len > 0; page++) { mlen = min(len, R92C_FW_PAGE_SIZE); error = urtwn_fw_loadpage(sc, page, ptr, mlen); if (error != 0) { device_printf(sc->sc_dev, "could not load firmware page\n"); goto fail; } ptr += mlen; len -= mlen; } urtwn_write_1(sc, R92C_MCUFWDL, urtwn_read_1(sc, R92C_MCUFWDL) & ~R92C_MCUFWDL_EN); urtwn_write_1(sc, R92C_MCUFWDL + 1, 0); /* Wait for checksum report. */ for (ntries = 0; ntries < 1000; ntries++) { if (urtwn_read_4(sc, R92C_MCUFWDL) & R92C_MCUFWDL_CHKSUM_RPT) break; urtwn_ms_delay(sc); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for checksum report\n"); error = ETIMEDOUT; goto fail; } reg = urtwn_read_4(sc, R92C_MCUFWDL); reg = (reg & ~R92C_MCUFWDL_WINTINI_RDY) | R92C_MCUFWDL_RDY; urtwn_write_4(sc, R92C_MCUFWDL, reg); if (sc->chip & URTWN_CHIP_88E) urtwn_r88e_fw_reset(sc); /* Wait for firmware readiness. */ for (ntries = 0; ntries < 1000; ntries++) { if (urtwn_read_4(sc, R92C_MCUFWDL) & R92C_MCUFWDL_WINTINI_RDY) break; urtwn_ms_delay(sc); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for firmware readiness\n"); error = ETIMEDOUT; goto fail; } fail: firmware_put(fw, FIRMWARE_UNLOAD); return (error); } #endif static int urtwn_dma_init(struct urtwn_softc *sc) { struct usb_endpoint *ep, *ep_end; usb_error_t usb_err; uint32_t reg; int hashq, hasnq, haslq, nqueues, ntx; int error, pagecount, npubqpages, nqpages, nrempages, tx_boundary; /* Initialize LLT table. */ error = urtwn_llt_init(sc); if (error != 0) return (error); /* Determine the number of bulk-out pipes. */ ntx = 0; ep = sc->sc_udev->endpoints; ep_end = sc->sc_udev->endpoints + sc->sc_udev->endpoints_max; for (; ep != ep_end; ep++) { if ((ep->edesc == NULL) || (ep->iface_index != sc->sc_iface_index)) continue; if (UE_GET_DIR(ep->edesc->bEndpointAddress) == UE_DIR_OUT) ntx++; } if (ntx == 0) { device_printf(sc->sc_dev, "%d: invalid number of Tx bulk pipes\n", ntx); return (EIO); } /* Get Tx queues to USB endpoints mapping. */ hashq = hasnq = haslq = nqueues = 0; switch (ntx) { case 1: hashq = 1; break; case 2: hashq = hasnq = 1; break; case 3: case 4: hashq = hasnq = haslq = 1; break; } nqueues = hashq + hasnq + haslq; if (nqueues == 0) return (EIO); npubqpages = nqpages = nrempages = pagecount = 0; if (sc->chip & URTWN_CHIP_88E) tx_boundary = R88E_TX_PAGE_BOUNDARY; else { pagecount = R92C_TX_PAGE_COUNT; npubqpages = R92C_PUBQ_NPAGES; tx_boundary = R92C_TX_PAGE_BOUNDARY; } /* Set number of pages for normal priority queue. */ if (sc->chip & URTWN_CHIP_88E) { usb_err = urtwn_write_2(sc, R92C_RQPN_NPQ, 0xd); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); usb_err = urtwn_write_4(sc, R92C_RQPN, 0x808e000d); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); } else { /* Get the number of pages for each queue. */ nqpages = (pagecount - npubqpages) / nqueues; /* * The remaining pages are assigned to the high priority * queue. */ nrempages = (pagecount - npubqpages) % nqueues; usb_err = urtwn_write_1(sc, R92C_RQPN_NPQ, hasnq ? nqpages : 0); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); usb_err = urtwn_write_4(sc, R92C_RQPN, /* Set number of pages for public queue. */ SM(R92C_RQPN_PUBQ, npubqpages) | /* Set number of pages for high priority queue. */ SM(R92C_RQPN_HPQ, hashq ? nqpages + nrempages : 0) | /* Set number of pages for low priority queue. */ SM(R92C_RQPN_LPQ, haslq ? nqpages : 0) | /* Load values. */ R92C_RQPN_LD); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); } usb_err = urtwn_write_1(sc, R92C_TXPKTBUF_BCNQ_BDNY, tx_boundary); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); usb_err = urtwn_write_1(sc, R92C_TXPKTBUF_MGQ_BDNY, tx_boundary); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); usb_err = urtwn_write_1(sc, R92C_TXPKTBUF_WMAC_LBK_BF_HD, tx_boundary); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); usb_err = urtwn_write_1(sc, R92C_TRXFF_BNDY, tx_boundary); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); usb_err = urtwn_write_1(sc, R92C_TDECTRL + 1, tx_boundary); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Set queue to USB pipe mapping. */ reg = urtwn_read_2(sc, R92C_TRXDMA_CTRL); reg &= ~R92C_TRXDMA_CTRL_QMAP_M; if (nqueues == 1) { if (hashq) reg |= R92C_TRXDMA_CTRL_QMAP_HQ; else if (hasnq) reg |= R92C_TRXDMA_CTRL_QMAP_NQ; else reg |= R92C_TRXDMA_CTRL_QMAP_LQ; } else if (nqueues == 2) { /* * All 2-endpoints configs have high and normal * priority queues. */ reg |= R92C_TRXDMA_CTRL_QMAP_HQ_NQ; } else reg |= R92C_TRXDMA_CTRL_QMAP_3EP; usb_err = urtwn_write_2(sc, R92C_TRXDMA_CTRL, reg); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Set Tx/Rx transfer page boundary. */ usb_err = urtwn_write_2(sc, R92C_TRXFF_BNDY + 2, (sc->chip & URTWN_CHIP_88E) ? 0x23ff : 0x27ff); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Set Tx/Rx transfer page size. */ usb_err = urtwn_write_1(sc, R92C_PBP, SM(R92C_PBP_PSRX, R92C_PBP_128) | SM(R92C_PBP_PSTX, R92C_PBP_128)); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); return (0); } static int urtwn_mac_init(struct urtwn_softc *sc) { usb_error_t error; int i; /* Write MAC initialization values. */ if (sc->chip & URTWN_CHIP_88E) { for (i = 0; i < nitems(rtl8188eu_mac); i++) { error = urtwn_write_1(sc, rtl8188eu_mac[i].reg, rtl8188eu_mac[i].val); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); } urtwn_write_1(sc, R92C_MAX_AGGR_NUM, 0x07); } else { for (i = 0; i < nitems(rtl8192cu_mac); i++) error = urtwn_write_1(sc, rtl8192cu_mac[i].reg, rtl8192cu_mac[i].val); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); } return (0); } static void urtwn_bb_init(struct urtwn_softc *sc) { const struct urtwn_bb_prog *prog; uint32_t reg; uint8_t crystalcap; int i; /* Enable BB and RF. */ urtwn_write_2(sc, R92C_SYS_FUNC_EN, urtwn_read_2(sc, R92C_SYS_FUNC_EN) | R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST | R92C_SYS_FUNC_EN_DIO_RF); if (!(sc->chip & URTWN_CHIP_88E)) urtwn_write_2(sc, R92C_AFE_PLL_CTRL, 0xdb83); urtwn_write_1(sc, R92C_RF_CTRL, R92C_RF_CTRL_EN | R92C_RF_CTRL_RSTB | R92C_RF_CTRL_SDMRSTB); urtwn_write_1(sc, R92C_SYS_FUNC_EN, R92C_SYS_FUNC_EN_USBA | R92C_SYS_FUNC_EN_USBD | R92C_SYS_FUNC_EN_BB_GLB_RST | R92C_SYS_FUNC_EN_BBRSTB); if (!(sc->chip & URTWN_CHIP_88E)) { urtwn_write_1(sc, R92C_LDOHCI12_CTRL, 0x0f); urtwn_write_1(sc, 0x15, 0xe9); urtwn_write_1(sc, R92C_AFE_XTAL_CTRL + 1, 0x80); } /* Select BB programming based on board type. */ if (sc->chip & URTWN_CHIP_88E) prog = &rtl8188eu_bb_prog; else if (!(sc->chip & URTWN_CHIP_92C)) { if (sc->board_type == R92C_BOARD_TYPE_MINICARD) prog = &rtl8188ce_bb_prog; else if (sc->board_type == R92C_BOARD_TYPE_HIGHPA) prog = &rtl8188ru_bb_prog; else prog = &rtl8188cu_bb_prog; } else { if (sc->board_type == R92C_BOARD_TYPE_MINICARD) prog = &rtl8192ce_bb_prog; else prog = &rtl8192cu_bb_prog; } /* Write BB initialization values. */ for (i = 0; i < prog->count; i++) { urtwn_bb_write(sc, prog->regs[i], prog->vals[i]); urtwn_ms_delay(sc); } if (sc->chip & URTWN_CHIP_92C_1T2R) { /* 8192C 1T only configuration. */ reg = urtwn_bb_read(sc, R92C_FPGA0_TXINFO); reg = (reg & ~0x00000003) | 0x2; urtwn_bb_write(sc, R92C_FPGA0_TXINFO, reg); reg = urtwn_bb_read(sc, R92C_FPGA1_TXINFO); reg = (reg & ~0x00300033) | 0x00200022; urtwn_bb_write(sc, R92C_FPGA1_TXINFO, reg); reg = urtwn_bb_read(sc, R92C_CCK0_AFESETTING); reg = (reg & ~0xff000000) | 0x45 << 24; urtwn_bb_write(sc, R92C_CCK0_AFESETTING, reg); reg = urtwn_bb_read(sc, R92C_OFDM0_TRXPATHENA); reg = (reg & ~0x000000ff) | 0x23; urtwn_bb_write(sc, R92C_OFDM0_TRXPATHENA, reg); reg = urtwn_bb_read(sc, R92C_OFDM0_AGCPARAM1); reg = (reg & ~0x00000030) | 1 << 4; urtwn_bb_write(sc, R92C_OFDM0_AGCPARAM1, reg); reg = urtwn_bb_read(sc, 0xe74); reg = (reg & ~0x0c000000) | 2 << 26; urtwn_bb_write(sc, 0xe74, reg); reg = urtwn_bb_read(sc, 0xe78); reg = (reg & ~0x0c000000) | 2 << 26; urtwn_bb_write(sc, 0xe78, reg); reg = urtwn_bb_read(sc, 0xe7c); reg = (reg & ~0x0c000000) | 2 << 26; urtwn_bb_write(sc, 0xe7c, reg); reg = urtwn_bb_read(sc, 0xe80); reg = (reg & ~0x0c000000) | 2 << 26; urtwn_bb_write(sc, 0xe80, reg); reg = urtwn_bb_read(sc, 0xe88); reg = (reg & ~0x0c000000) | 2 << 26; urtwn_bb_write(sc, 0xe88, reg); } /* Write AGC values. */ for (i = 0; i < prog->agccount; i++) { urtwn_bb_write(sc, R92C_OFDM0_AGCRSSITABLE, prog->agcvals[i]); urtwn_ms_delay(sc); } if (sc->chip & URTWN_CHIP_88E) { urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), 0x69553422); urtwn_ms_delay(sc); urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), 0x69553420); urtwn_ms_delay(sc); crystalcap = sc->rom.r88e_rom.crystalcap; if (crystalcap == 0xff) crystalcap = 0x20; crystalcap &= 0x3f; reg = urtwn_bb_read(sc, R92C_AFE_XTAL_CTRL); urtwn_bb_write(sc, R92C_AFE_XTAL_CTRL, RW(reg, R92C_AFE_XTAL_CTRL_ADDR, crystalcap | crystalcap << 6)); } else { if (urtwn_bb_read(sc, R92C_HSSI_PARAM2(0)) & R92C_HSSI_PARAM2_CCK_HIPWR) sc->sc_flags |= URTWN_FLAG_CCK_HIPWR; } } static void urtwn_rf_init(struct urtwn_softc *sc) { const struct urtwn_rf_prog *prog; uint32_t reg, type; int i, j, idx, off; /* Select RF programming based on board type. */ if (sc->chip & URTWN_CHIP_88E) prog = rtl8188eu_rf_prog; else if (!(sc->chip & URTWN_CHIP_92C)) { if (sc->board_type == R92C_BOARD_TYPE_MINICARD) prog = rtl8188ce_rf_prog; else if (sc->board_type == R92C_BOARD_TYPE_HIGHPA) prog = rtl8188ru_rf_prog; else prog = rtl8188cu_rf_prog; } else prog = rtl8192ce_rf_prog; for (i = 0; i < sc->nrxchains; i++) { /* Save RF_ENV control type. */ idx = i / 2; off = (i % 2) * 16; reg = urtwn_bb_read(sc, R92C_FPGA0_RFIFACESW(idx)); type = (reg >> off) & 0x10; /* Set RF_ENV enable. */ reg = urtwn_bb_read(sc, R92C_FPGA0_RFIFACEOE(i)); reg |= 0x100000; urtwn_bb_write(sc, R92C_FPGA0_RFIFACEOE(i), reg); urtwn_ms_delay(sc); /* Set RF_ENV output high. */ reg = urtwn_bb_read(sc, R92C_FPGA0_RFIFACEOE(i)); reg |= 0x10; urtwn_bb_write(sc, R92C_FPGA0_RFIFACEOE(i), reg); urtwn_ms_delay(sc); /* Set address and data lengths of RF registers. */ reg = urtwn_bb_read(sc, R92C_HSSI_PARAM2(i)); reg &= ~R92C_HSSI_PARAM2_ADDR_LENGTH; urtwn_bb_write(sc, R92C_HSSI_PARAM2(i), reg); urtwn_ms_delay(sc); reg = urtwn_bb_read(sc, R92C_HSSI_PARAM2(i)); reg &= ~R92C_HSSI_PARAM2_DATA_LENGTH; urtwn_bb_write(sc, R92C_HSSI_PARAM2(i), reg); urtwn_ms_delay(sc); /* Write RF initialization values for this chain. */ for (j = 0; j < prog[i].count; j++) { if (prog[i].regs[j] >= 0xf9 && prog[i].regs[j] <= 0xfe) { /* * These are fake RF registers offsets that * indicate a delay is required. */ usb_pause_mtx(&sc->sc_mtx, hz / 20); /* 50ms */ continue; } urtwn_rf_write(sc, i, prog[i].regs[j], prog[i].vals[j]); urtwn_ms_delay(sc); } /* Restore RF_ENV control type. */ reg = urtwn_bb_read(sc, R92C_FPGA0_RFIFACESW(idx)); reg &= ~(0x10 << off) | (type << off); urtwn_bb_write(sc, R92C_FPGA0_RFIFACESW(idx), reg); /* Cache RF register CHNLBW. */ sc->rf_chnlbw[i] = urtwn_rf_read(sc, i, R92C_RF_CHNLBW); } if ((sc->chip & (URTWN_CHIP_UMC_A_CUT | URTWN_CHIP_92C)) == URTWN_CHIP_UMC_A_CUT) { urtwn_rf_write(sc, 0, R92C_RF_RX_G1, 0x30255); urtwn_rf_write(sc, 0, R92C_RF_RX_G2, 0x50a00); } } static void urtwn_cam_init(struct urtwn_softc *sc) { /* Invalidate all CAM entries. */ urtwn_write_4(sc, R92C_CAMCMD, R92C_CAMCMD_POLLING | R92C_CAMCMD_CLR); } static int urtwn_cam_write(struct urtwn_softc *sc, uint32_t addr, uint32_t data) { usb_error_t error; error = urtwn_write_4(sc, R92C_CAMWRITE, data); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); error = urtwn_write_4(sc, R92C_CAMCMD, R92C_CAMCMD_POLLING | R92C_CAMCMD_WRITE | SM(R92C_CAMCMD_ADDR, addr)); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); return (0); } static void urtwn_pa_bias_init(struct urtwn_softc *sc) { uint8_t reg; int i; for (i = 0; i < sc->nrxchains; i++) { if (sc->pa_setting & (1 << i)) continue; urtwn_rf_write(sc, i, R92C_RF_IPA, 0x0f406); urtwn_rf_write(sc, i, R92C_RF_IPA, 0x4f406); urtwn_rf_write(sc, i, R92C_RF_IPA, 0x8f406); urtwn_rf_write(sc, i, R92C_RF_IPA, 0xcf406); } if (!(sc->pa_setting & 0x10)) { reg = urtwn_read_1(sc, 0x16); reg = (reg & ~0xf0) | 0x90; urtwn_write_1(sc, 0x16, reg); } } static void urtwn_rxfilter_init(struct urtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t rcr; uint16_t filter; URTWN_ASSERT_LOCKED(sc); /* Accept all multicast frames. */ urtwn_write_4(sc, R92C_MAR + 0, 0xffffffff); urtwn_write_4(sc, R92C_MAR + 4, 0xffffffff); /* Filter for management frames. */ filter = 0x7f3f; switch (vap->iv_opmode) { case IEEE80211_M_STA: filter &= ~( R92C_RXFLTMAP_SUBTYPE(IEEE80211_FC0_SUBTYPE_ASSOC_REQ) | R92C_RXFLTMAP_SUBTYPE(IEEE80211_FC0_SUBTYPE_REASSOC_REQ) | R92C_RXFLTMAP_SUBTYPE(IEEE80211_FC0_SUBTYPE_PROBE_REQ)); break; case IEEE80211_M_HOSTAP: filter &= ~( R92C_RXFLTMAP_SUBTYPE(IEEE80211_FC0_SUBTYPE_ASSOC_RESP) | R92C_RXFLTMAP_SUBTYPE(IEEE80211_FC0_SUBTYPE_REASSOC_RESP) | R92C_RXFLTMAP_SUBTYPE(IEEE80211_FC0_SUBTYPE_BEACON)); break; case IEEE80211_M_MONITOR: case IEEE80211_M_IBSS: break; default: device_printf(sc->sc_dev, "%s: undefined opmode %d\n", __func__, vap->iv_opmode); break; } urtwn_write_2(sc, R92C_RXFLTMAP0, filter); /* Reject all control frames. */ urtwn_write_2(sc, R92C_RXFLTMAP1, 0x0000); /* Reject all data frames. */ urtwn_write_2(sc, R92C_RXFLTMAP2, 0x0000); rcr = R92C_RCR_AM | R92C_RCR_AB | R92C_RCR_APM | R92C_RCR_HTC_LOC_CTRL | R92C_RCR_APP_PHYSTS | R92C_RCR_APP_ICV | R92C_RCR_APP_MIC; if (vap->iv_opmode == IEEE80211_M_MONITOR) { /* Accept all frames. */ rcr |= R92C_RCR_ACF | R92C_RCR_ADF | R92C_RCR_AMF | R92C_RCR_AAP; } /* Set Rx filter. */ urtwn_write_4(sc, R92C_RCR, rcr); if (ic->ic_promisc != 0) { /* Update Rx filter. */ urtwn_set_promisc(sc); } } static void urtwn_edca_init(struct urtwn_softc *sc) { urtwn_write_2(sc, R92C_SPEC_SIFS, 0x100a); urtwn_write_2(sc, R92C_MAC_SPEC_SIFS, 0x100a); urtwn_write_2(sc, R92C_SIFS_CCK, 0x100a); urtwn_write_2(sc, R92C_SIFS_OFDM, 0x100a); urtwn_write_4(sc, R92C_EDCA_BE_PARAM, 0x005ea42b); urtwn_write_4(sc, R92C_EDCA_BK_PARAM, 0x0000a44f); urtwn_write_4(sc, R92C_EDCA_VI_PARAM, 0x005ea324); urtwn_write_4(sc, R92C_EDCA_VO_PARAM, 0x002fa226); } static void urtwn_write_txpower(struct urtwn_softc *sc, int chain, uint16_t power[URTWN_RIDX_COUNT]) { uint32_t reg; /* Write per-CCK rate Tx power. */ if (chain == 0) { reg = urtwn_bb_read(sc, R92C_TXAGC_A_CCK1_MCS32); reg = RW(reg, R92C_TXAGC_A_CCK1, power[0]); urtwn_bb_write(sc, R92C_TXAGC_A_CCK1_MCS32, reg); reg = urtwn_bb_read(sc, R92C_TXAGC_B_CCK11_A_CCK2_11); reg = RW(reg, R92C_TXAGC_A_CCK2, power[1]); reg = RW(reg, R92C_TXAGC_A_CCK55, power[2]); reg = RW(reg, R92C_TXAGC_A_CCK11, power[3]); urtwn_bb_write(sc, R92C_TXAGC_B_CCK11_A_CCK2_11, reg); } else { reg = urtwn_bb_read(sc, R92C_TXAGC_B_CCK1_55_MCS32); reg = RW(reg, R92C_TXAGC_B_CCK1, power[0]); reg = RW(reg, R92C_TXAGC_B_CCK2, power[1]); reg = RW(reg, R92C_TXAGC_B_CCK55, power[2]); urtwn_bb_write(sc, R92C_TXAGC_B_CCK1_55_MCS32, reg); reg = urtwn_bb_read(sc, R92C_TXAGC_B_CCK11_A_CCK2_11); reg = RW(reg, R92C_TXAGC_B_CCK11, power[3]); urtwn_bb_write(sc, R92C_TXAGC_B_CCK11_A_CCK2_11, reg); } /* Write per-OFDM rate Tx power. */ urtwn_bb_write(sc, R92C_TXAGC_RATE18_06(chain), SM(R92C_TXAGC_RATE06, power[ 4]) | SM(R92C_TXAGC_RATE09, power[ 5]) | SM(R92C_TXAGC_RATE12, power[ 6]) | SM(R92C_TXAGC_RATE18, power[ 7])); urtwn_bb_write(sc, R92C_TXAGC_RATE54_24(chain), SM(R92C_TXAGC_RATE24, power[ 8]) | SM(R92C_TXAGC_RATE36, power[ 9]) | SM(R92C_TXAGC_RATE48, power[10]) | SM(R92C_TXAGC_RATE54, power[11])); /* Write per-MCS Tx power. */ urtwn_bb_write(sc, R92C_TXAGC_MCS03_MCS00(chain), SM(R92C_TXAGC_MCS00, power[12]) | SM(R92C_TXAGC_MCS01, power[13]) | SM(R92C_TXAGC_MCS02, power[14]) | SM(R92C_TXAGC_MCS03, power[15])); urtwn_bb_write(sc, R92C_TXAGC_MCS07_MCS04(chain), SM(R92C_TXAGC_MCS04, power[16]) | SM(R92C_TXAGC_MCS05, power[17]) | SM(R92C_TXAGC_MCS06, power[18]) | SM(R92C_TXAGC_MCS07, power[19])); urtwn_bb_write(sc, R92C_TXAGC_MCS11_MCS08(chain), SM(R92C_TXAGC_MCS08, power[20]) | SM(R92C_TXAGC_MCS09, power[21]) | SM(R92C_TXAGC_MCS10, power[22]) | SM(R92C_TXAGC_MCS11, power[23])); urtwn_bb_write(sc, R92C_TXAGC_MCS15_MCS12(chain), SM(R92C_TXAGC_MCS12, power[24]) | SM(R92C_TXAGC_MCS13, power[25]) | SM(R92C_TXAGC_MCS14, power[26]) | SM(R92C_TXAGC_MCS15, power[27])); } static void urtwn_get_txpower(struct urtwn_softc *sc, int chain, struct ieee80211_channel *c, struct ieee80211_channel *extc, uint16_t power[URTWN_RIDX_COUNT]) { struct ieee80211com *ic = &sc->sc_ic; struct r92c_rom *rom = &sc->rom.r92c_rom; uint16_t cckpow, ofdmpow, htpow, diff, max; const struct urtwn_txpwr *base; int ridx, chan, group; /* Determine channel group. */ chan = ieee80211_chan2ieee(ic, c); /* XXX center freq! */ if (chan <= 3) group = 0; else if (chan <= 9) group = 1; else group = 2; /* Get original Tx power based on board type and RF chain. */ if (!(sc->chip & URTWN_CHIP_92C)) { if (sc->board_type == R92C_BOARD_TYPE_HIGHPA) base = &rtl8188ru_txagc[chain]; else base = &rtl8192cu_txagc[chain]; } else base = &rtl8192cu_txagc[chain]; memset(power, 0, URTWN_RIDX_COUNT * sizeof(power[0])); if (sc->regulatory == 0) { for (ridx = URTWN_RIDX_CCK1; ridx <= URTWN_RIDX_CCK11; ridx++) power[ridx] = base->pwr[0][ridx]; } for (ridx = URTWN_RIDX_OFDM6; ridx < URTWN_RIDX_COUNT; ridx++) { if (sc->regulatory == 3) { power[ridx] = base->pwr[0][ridx]; /* Apply vendor limits. */ if (extc != NULL) max = rom->ht40_max_pwr[group]; else max = rom->ht20_max_pwr[group]; max = (max >> (chain * 4)) & 0xf; if (power[ridx] > max) power[ridx] = max; } else if (sc->regulatory == 1) { if (extc == NULL) power[ridx] = base->pwr[group][ridx]; } else if (sc->regulatory != 2) power[ridx] = base->pwr[0][ridx]; } /* Compute per-CCK rate Tx power. */ cckpow = rom->cck_tx_pwr[chain][group]; for (ridx = URTWN_RIDX_CCK1; ridx <= URTWN_RIDX_CCK11; ridx++) { power[ridx] += cckpow; if (power[ridx] > R92C_MAX_TX_PWR) power[ridx] = R92C_MAX_TX_PWR; } htpow = rom->ht40_1s_tx_pwr[chain][group]; if (sc->ntxchains > 1) { /* Apply reduction for 2 spatial streams. */ diff = rom->ht40_2s_tx_pwr_diff[group]; diff = (diff >> (chain * 4)) & 0xf; htpow = (htpow > diff) ? htpow - diff : 0; } /* Compute per-OFDM rate Tx power. */ diff = rom->ofdm_tx_pwr_diff[group]; diff = (diff >> (chain * 4)) & 0xf; ofdmpow = htpow + diff; /* HT->OFDM correction. */ for (ridx = URTWN_RIDX_OFDM6; ridx <= URTWN_RIDX_OFDM54; ridx++) { power[ridx] += ofdmpow; if (power[ridx] > R92C_MAX_TX_PWR) power[ridx] = R92C_MAX_TX_PWR; } /* Compute per-MCS Tx power. */ if (extc == NULL) { diff = rom->ht20_tx_pwr_diff[group]; diff = (diff >> (chain * 4)) & 0xf; htpow += diff; /* HT40->HT20 correction. */ } for (ridx = 12; ridx <= 27; ridx++) { power[ridx] += htpow; if (power[ridx] > R92C_MAX_TX_PWR) power[ridx] = R92C_MAX_TX_PWR; } #ifdef USB_DEBUG if (sc->sc_debug & URTWN_DEBUG_TXPWR) { /* Dump per-rate Tx power values. */ printf("Tx power for chain %d:\n", chain); for (ridx = URTWN_RIDX_CCK1; ridx < URTWN_RIDX_COUNT; ridx++) printf("Rate %d = %u\n", ridx, power[ridx]); } #endif } static void urtwn_r88e_get_txpower(struct urtwn_softc *sc, int chain, struct ieee80211_channel *c, struct ieee80211_channel *extc, uint16_t power[URTWN_RIDX_COUNT]) { struct ieee80211com *ic = &sc->sc_ic; struct r88e_rom *rom = &sc->rom.r88e_rom; uint16_t cckpow, ofdmpow, bw20pow, htpow; const struct urtwn_r88e_txpwr *base; int ridx, chan, group; /* Determine channel group. */ chan = ieee80211_chan2ieee(ic, c); /* XXX center freq! */ if (chan <= 2) group = 0; else if (chan <= 5) group = 1; else if (chan <= 8) group = 2; else if (chan <= 11) group = 3; else if (chan <= 13) group = 4; else group = 5; /* Get original Tx power based on board type and RF chain. */ base = &rtl8188eu_txagc[chain]; memset(power, 0, URTWN_RIDX_COUNT * sizeof(power[0])); if (sc->regulatory == 0) { for (ridx = URTWN_RIDX_CCK1; ridx <= URTWN_RIDX_CCK11; ridx++) power[ridx] = base->pwr[0][ridx]; } for (ridx = URTWN_RIDX_OFDM6; ridx < URTWN_RIDX_COUNT; ridx++) { if (sc->regulatory == 3) power[ridx] = base->pwr[0][ridx]; else if (sc->regulatory == 1) { if (extc == NULL) power[ridx] = base->pwr[group][ridx]; } else if (sc->regulatory != 2) power[ridx] = base->pwr[0][ridx]; } /* Compute per-CCK rate Tx power. */ cckpow = rom->cck_tx_pwr[group]; for (ridx = URTWN_RIDX_CCK1; ridx <= URTWN_RIDX_CCK11; ridx++) { power[ridx] += cckpow; if (power[ridx] > R92C_MAX_TX_PWR) power[ridx] = R92C_MAX_TX_PWR; } htpow = rom->ht40_tx_pwr[group]; /* Compute per-OFDM rate Tx power. */ ofdmpow = htpow + sc->ofdm_tx_pwr_diff; for (ridx = URTWN_RIDX_OFDM6; ridx <= URTWN_RIDX_OFDM54; ridx++) { power[ridx] += ofdmpow; if (power[ridx] > R92C_MAX_TX_PWR) power[ridx] = R92C_MAX_TX_PWR; } bw20pow = htpow + sc->bw20_tx_pwr_diff; for (ridx = 12; ridx <= 27; ridx++) { power[ridx] += bw20pow; if (power[ridx] > R92C_MAX_TX_PWR) power[ridx] = R92C_MAX_TX_PWR; } } static void urtwn_set_txpower(struct urtwn_softc *sc, struct ieee80211_channel *c, struct ieee80211_channel *extc) { uint16_t power[URTWN_RIDX_COUNT]; int i; for (i = 0; i < sc->ntxchains; i++) { /* Compute per-rate Tx power values. */ if (sc->chip & URTWN_CHIP_88E) urtwn_r88e_get_txpower(sc, i, c, extc, power); else urtwn_get_txpower(sc, i, c, extc, power); /* Write per-rate Tx power values to hardware. */ urtwn_write_txpower(sc, i, power); } } static void urtwn_set_rx_bssid_all(struct urtwn_softc *sc, int enable) { uint32_t reg; reg = urtwn_read_4(sc, R92C_RCR); if (enable) reg &= ~R92C_RCR_CBSSID_BCN; else reg |= R92C_RCR_CBSSID_BCN; urtwn_write_4(sc, R92C_RCR, reg); } static void urtwn_set_gain(struct urtwn_softc *sc, uint8_t gain) { uint32_t reg; reg = urtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(0)); reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, gain); urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), reg); if (!(sc->chip & URTWN_CHIP_88E)) { reg = urtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(1)); reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, gain); urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(1), reg); } } static void urtwn_scan_start(struct ieee80211com *ic) { struct urtwn_softc *sc = ic->ic_softc; URTWN_LOCK(sc); /* Receive beacons / probe responses from any BSSID. */ if (ic->ic_opmode != IEEE80211_M_IBSS) urtwn_set_rx_bssid_all(sc, 1); /* Set gain for scanning. */ urtwn_set_gain(sc, 0x20); URTWN_UNLOCK(sc); } static void urtwn_scan_end(struct ieee80211com *ic) { struct urtwn_softc *sc = ic->ic_softc; URTWN_LOCK(sc); /* Restore limitations. */ if (ic->ic_promisc == 0 && ic->ic_opmode != IEEE80211_M_IBSS) urtwn_set_rx_bssid_all(sc, 0); /* Set gain under link. */ urtwn_set_gain(sc, 0x32); URTWN_UNLOCK(sc); } static void urtwn_set_channel(struct ieee80211com *ic) { struct urtwn_softc *sc = ic->ic_softc; struct ieee80211_channel *c = ic->ic_curchan; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); URTWN_LOCK(sc); if (vap->iv_state == IEEE80211_S_SCAN) { /* Make link LED blink during scan. */ urtwn_set_led(sc, URTWN_LED_LINK, !sc->ledlink); } urtwn_set_chan(sc, c, NULL); sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); URTWN_UNLOCK(sc); } static int urtwn_wme_update(struct ieee80211com *ic) { const struct wmeParams *wmep = ic->ic_wme.wme_chanParams.cap_wmeParams; struct urtwn_softc *sc = ic->ic_softc; uint8_t aifs, acm, slottime; int ac; acm = 0; slottime = IEEE80211_GET_SLOTTIME(ic); URTWN_LOCK(sc); for (ac = WME_AC_BE; ac < WME_NUM_AC; ac++) { /* AIFS[AC] = AIFSN[AC] * aSlotTime + aSIFSTime. */ aifs = wmep[ac].wmep_aifsn * slottime + IEEE80211_DUR_SIFS; urtwn_write_4(sc, wme2queue[ac].reg, SM(R92C_EDCA_PARAM_TXOP, wmep[ac].wmep_txopLimit) | SM(R92C_EDCA_PARAM_ECWMIN, wmep[ac].wmep_logcwmin) | SM(R92C_EDCA_PARAM_ECWMAX, wmep[ac].wmep_logcwmax) | SM(R92C_EDCA_PARAM_AIFS, aifs)); if (ac != WME_AC_BE) acm |= wmep[ac].wmep_acm << ac; } if (acm != 0) acm |= R92C_ACMHWCTRL_EN; urtwn_write_1(sc, R92C_ACMHWCTRL, (urtwn_read_1(sc, R92C_ACMHWCTRL) & ~R92C_ACMHWCTRL_ACM_MASK) | acm); URTWN_UNLOCK(sc); return 0; } static void urtwn_update_slot(struct ieee80211com *ic) { urtwn_cmd_sleepable(ic->ic_softc, NULL, 0, urtwn_update_slot_cb); } static void urtwn_update_slot_cb(struct urtwn_softc *sc, union sec_param *data) { struct ieee80211com *ic = &sc->sc_ic; uint8_t slottime; slottime = IEEE80211_GET_SLOTTIME(ic); URTWN_DPRINTF(sc, URTWN_DEBUG_ANY, "%s: setting slot time to %uus\n", __func__, slottime); urtwn_write_1(sc, R92C_SLOT, slottime); urtwn_update_aifs(sc, slottime); } static void urtwn_update_aifs(struct urtwn_softc *sc, uint8_t slottime) { const struct wmeParams *wmep = sc->sc_ic.ic_wme.wme_chanParams.cap_wmeParams; uint8_t aifs, ac; for (ac = WME_AC_BE; ac < WME_NUM_AC; ac++) { /* AIFS[AC] = AIFSN[AC] * aSlotTime + aSIFSTime. */ aifs = wmep[ac].wmep_aifsn * slottime + IEEE80211_DUR_SIFS; urtwn_write_1(sc, wme2queue[ac].reg, aifs); } } static void urtwn_set_promisc(struct urtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t rcr, mask1, mask2; URTWN_ASSERT_LOCKED(sc); if (vap->iv_opmode == IEEE80211_M_MONITOR) return; mask1 = R92C_RCR_ACF | R92C_RCR_ADF | R92C_RCR_AMF | R92C_RCR_AAP; mask2 = R92C_RCR_APM; if (vap->iv_state == IEEE80211_S_RUN) { switch (vap->iv_opmode) { case IEEE80211_M_STA: mask2 |= R92C_RCR_CBSSID_DATA; /* FALLTHROUGH */ case IEEE80211_M_HOSTAP: mask2 |= R92C_RCR_CBSSID_BCN; break; case IEEE80211_M_IBSS: mask2 |= R92C_RCR_CBSSID_DATA; break; default: device_printf(sc->sc_dev, "%s: undefined opmode %d\n", __func__, vap->iv_opmode); return; } } rcr = urtwn_read_4(sc, R92C_RCR); if (ic->ic_promisc == 0) rcr = (rcr & ~mask1) | mask2; else rcr = (rcr & ~mask2) | mask1; urtwn_write_4(sc, R92C_RCR, rcr); } static void urtwn_update_promisc(struct ieee80211com *ic) { struct urtwn_softc *sc = ic->ic_softc; URTWN_LOCK(sc); if (sc->sc_flags & URTWN_RUNNING) urtwn_set_promisc(sc); URTWN_UNLOCK(sc); } static void urtwn_update_mcast(struct ieee80211com *ic) { /* XXX do nothing? */ } static struct ieee80211_node * urtwn_r88e_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct urtwn_node *un; un = malloc(sizeof (struct urtwn_node), M_80211_NODE, M_NOWAIT | M_ZERO); if (un == NULL) return NULL; un->id = URTWN_MACID_UNDEFINED; return &un->ni; } static void urtwn_r88e_newassoc(struct ieee80211_node *ni, int isnew) { struct urtwn_softc *sc = ni->ni_ic->ic_softc; struct urtwn_node *un = URTWN_NODE(ni); uint8_t id; if (!isnew) return; URTWN_NT_LOCK(sc); for (id = 0; id <= URTWN_MACID_MAX(sc); id++) { if (id != URTWN_MACID_BC && sc->node_list[id] == NULL) { un->id = id; sc->node_list[id] = ni; break; } } URTWN_NT_UNLOCK(sc); if (id > URTWN_MACID_MAX(sc)) { device_printf(sc->sc_dev, "%s: node table is full\n", __func__); } } static void urtwn_r88e_node_free(struct ieee80211_node *ni) { struct urtwn_softc *sc = ni->ni_ic->ic_softc; struct urtwn_node *un = URTWN_NODE(ni); URTWN_NT_LOCK(sc); if (un->id != URTWN_MACID_UNDEFINED) sc->node_list[un->id] = NULL; URTWN_NT_UNLOCK(sc); sc->sc_node_free(ni); } static void urtwn_set_chan(struct urtwn_softc *sc, struct ieee80211_channel *c, struct ieee80211_channel *extc) { struct ieee80211com *ic = &sc->sc_ic; uint32_t reg; u_int chan; int i; chan = ieee80211_chan2ieee(ic, c); /* XXX center freq! */ if (chan == 0 || chan == IEEE80211_CHAN_ANY) { device_printf(sc->sc_dev, "%s: invalid channel %x\n", __func__, chan); return; } /* Set Tx power for this new channel. */ urtwn_set_txpower(sc, c, extc); for (i = 0; i < sc->nrxchains; i++) { urtwn_rf_write(sc, i, R92C_RF_CHNLBW, RW(sc->rf_chnlbw[i], R92C_RF_CHNLBW_CHNL, chan)); } #ifndef IEEE80211_NO_HT if (extc != NULL) { /* Is secondary channel below or above primary? */ int prichlo = c->ic_freq < extc->ic_freq; urtwn_write_1(sc, R92C_BWOPMODE, urtwn_read_1(sc, R92C_BWOPMODE) & ~R92C_BWOPMODE_20MHZ); reg = urtwn_read_1(sc, R92C_RRSR + 2); reg = (reg & ~0x6f) | (prichlo ? 1 : 2) << 5; urtwn_write_1(sc, R92C_RRSR + 2, reg); urtwn_bb_write(sc, R92C_FPGA0_RFMOD, urtwn_bb_read(sc, R92C_FPGA0_RFMOD) | R92C_RFMOD_40MHZ); urtwn_bb_write(sc, R92C_FPGA1_RFMOD, urtwn_bb_read(sc, R92C_FPGA1_RFMOD) | R92C_RFMOD_40MHZ); /* Set CCK side band. */ reg = urtwn_bb_read(sc, R92C_CCK0_SYSTEM); reg = (reg & ~0x00000010) | (prichlo ? 0 : 1) << 4; urtwn_bb_write(sc, R92C_CCK0_SYSTEM, reg); reg = urtwn_bb_read(sc, R92C_OFDM1_LSTF); reg = (reg & ~0x00000c00) | (prichlo ? 1 : 2) << 10; urtwn_bb_write(sc, R92C_OFDM1_LSTF, reg); urtwn_bb_write(sc, R92C_FPGA0_ANAPARAM2, urtwn_bb_read(sc, R92C_FPGA0_ANAPARAM2) & ~R92C_FPGA0_ANAPARAM2_CBW20); reg = urtwn_bb_read(sc, 0x818); reg = (reg & ~0x0c000000) | (prichlo ? 2 : 1) << 26; urtwn_bb_write(sc, 0x818, reg); /* Select 40MHz bandwidth. */ urtwn_rf_write(sc, 0, R92C_RF_CHNLBW, (sc->rf_chnlbw[0] & ~0xfff) | chan); } else #endif { urtwn_write_1(sc, R92C_BWOPMODE, urtwn_read_1(sc, R92C_BWOPMODE) | R92C_BWOPMODE_20MHZ); urtwn_bb_write(sc, R92C_FPGA0_RFMOD, urtwn_bb_read(sc, R92C_FPGA0_RFMOD) & ~R92C_RFMOD_40MHZ); urtwn_bb_write(sc, R92C_FPGA1_RFMOD, urtwn_bb_read(sc, R92C_FPGA1_RFMOD) & ~R92C_RFMOD_40MHZ); if (!(sc->chip & URTWN_CHIP_88E)) { urtwn_bb_write(sc, R92C_FPGA0_ANAPARAM2, urtwn_bb_read(sc, R92C_FPGA0_ANAPARAM2) | R92C_FPGA0_ANAPARAM2_CBW20); } /* Select 20MHz bandwidth. */ urtwn_rf_write(sc, 0, R92C_RF_CHNLBW, (sc->rf_chnlbw[0] & ~0xfff) | chan | ((sc->chip & URTWN_CHIP_88E) ? R88E_RF_CHNLBW_BW20 : R92C_RF_CHNLBW_BW20)); } } static void urtwn_iq_calib(struct urtwn_softc *sc) { /* TODO */ } static void urtwn_lc_calib(struct urtwn_softc *sc) { uint32_t rf_ac[2]; uint8_t txmode; int i; txmode = urtwn_read_1(sc, R92C_OFDM1_LSTF + 3); if ((txmode & 0x70) != 0) { /* Disable all continuous Tx. */ urtwn_write_1(sc, R92C_OFDM1_LSTF + 3, txmode & ~0x70); /* Set RF mode to standby mode. */ for (i = 0; i < sc->nrxchains; i++) { rf_ac[i] = urtwn_rf_read(sc, i, R92C_RF_AC); urtwn_rf_write(sc, i, R92C_RF_AC, RW(rf_ac[i], R92C_RF_AC_MODE, R92C_RF_AC_MODE_STANDBY)); } } else { /* Block all Tx queues. */ urtwn_write_1(sc, R92C_TXPAUSE, R92C_TX_QUEUE_ALL); } /* Start calibration. */ urtwn_rf_write(sc, 0, R92C_RF_CHNLBW, urtwn_rf_read(sc, 0, R92C_RF_CHNLBW) | R92C_RF_CHNLBW_LCSTART); /* Give calibration the time to complete. */ usb_pause_mtx(&sc->sc_mtx, hz / 10); /* 100ms */ /* Restore configuration. */ if ((txmode & 0x70) != 0) { /* Restore Tx mode. */ urtwn_write_1(sc, R92C_OFDM1_LSTF + 3, txmode); /* Restore RF mode. */ for (i = 0; i < sc->nrxchains; i++) urtwn_rf_write(sc, i, R92C_RF_AC, rf_ac[i]); } else { /* Unblock all Tx queues. */ urtwn_write_1(sc, R92C_TXPAUSE, 0x00); } } static void urtwn_temp_calib(struct urtwn_softc *sc) { uint8_t temp; URTWN_ASSERT_LOCKED(sc); if (!(sc->sc_flags & URTWN_TEMP_MEASURED)) { /* Start measuring temperature. */ URTWN_DPRINTF(sc, URTWN_DEBUG_TEMP, "%s: start measuring temperature\n", __func__); if (sc->chip & URTWN_CHIP_88E) { urtwn_rf_write(sc, 0, R88E_RF_T_METER, R88E_RF_T_METER_START); } else { urtwn_rf_write(sc, 0, R92C_RF_T_METER, R92C_RF_T_METER_START); } sc->sc_flags |= URTWN_TEMP_MEASURED; return; } sc->sc_flags &= ~URTWN_TEMP_MEASURED; /* Read measured temperature. */ if (sc->chip & URTWN_CHIP_88E) { temp = MS(urtwn_rf_read(sc, 0, R88E_RF_T_METER), R88E_RF_T_METER_VAL); } else { temp = MS(urtwn_rf_read(sc, 0, R92C_RF_T_METER), R92C_RF_T_METER_VAL); } if (temp == 0) { /* Read failed, skip. */ URTWN_DPRINTF(sc, URTWN_DEBUG_TEMP, "%s: temperature read failed, skipping\n", __func__); return; } URTWN_DPRINTF(sc, URTWN_DEBUG_TEMP, "%s: temperature: previous %u, current %u\n", __func__, sc->thcal_lctemp, temp); /* * Redo LC calibration if temperature changed significantly since * last calibration. */ if (sc->thcal_lctemp == 0) { /* First LC calibration is performed in urtwn_init(). */ sc->thcal_lctemp = temp; } else if (abs(temp - sc->thcal_lctemp) > 1) { URTWN_DPRINTF(sc, URTWN_DEBUG_TEMP, "%s: LC calib triggered by temp: %u -> %u\n", __func__, sc->thcal_lctemp, temp); urtwn_lc_calib(sc); /* Record temperature of last LC calibration. */ sc->thcal_lctemp = temp; } } static int urtwn_init(struct urtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint8_t macaddr[IEEE80211_ADDR_LEN]; uint32_t reg; usb_error_t usb_err = USB_ERR_NORMAL_COMPLETION; int error; URTWN_LOCK(sc); if (sc->sc_flags & URTWN_RUNNING) { URTWN_UNLOCK(sc); return (0); } /* Init firmware commands ring. */ sc->fwcur = 0; /* Allocate Tx/Rx buffers. */ error = urtwn_alloc_rx_list(sc); if (error != 0) goto fail; error = urtwn_alloc_tx_list(sc); if (error != 0) goto fail; /* Power on adapter. */ error = urtwn_power_on(sc); if (error != 0) goto fail; /* Initialize DMA. */ error = urtwn_dma_init(sc); if (error != 0) goto fail; /* Set info size in Rx descriptors (in 64-bit words). */ urtwn_write_1(sc, R92C_RX_DRVINFO_SZ, 4); /* Init interrupts. */ if (sc->chip & URTWN_CHIP_88E) { usb_err = urtwn_write_4(sc, R88E_HISR, 0xffffffff); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; usb_err = urtwn_write_4(sc, R88E_HIMR, R88E_HIMR_CPWM | R88E_HIMR_CPWM2 | R88E_HIMR_TBDER | R88E_HIMR_PSTIMEOUT); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; usb_err = urtwn_write_4(sc, R88E_HIMRE, R88E_HIMRE_RXFOVW | R88E_HIMRE_TXFOVW | R88E_HIMRE_RXERR | R88E_HIMRE_TXERR); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; usb_err = urtwn_write_1(sc, R92C_USB_SPECIAL_OPTION, urtwn_read_1(sc, R92C_USB_SPECIAL_OPTION) | R92C_USB_SPECIAL_OPTION_INT_BULK_SEL); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; } else { usb_err = urtwn_write_4(sc, R92C_HISR, 0xffffffff); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; usb_err = urtwn_write_4(sc, R92C_HIMR, 0xffffffff); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; } /* Set MAC address. */ IEEE80211_ADDR_COPY(macaddr, vap ? vap->iv_myaddr : ic->ic_macaddr); usb_err = urtwn_write_region_1(sc, R92C_MACID, macaddr, IEEE80211_ADDR_LEN); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; /* Set initial network type. */ urtwn_set_mode(sc, R92C_MSR_INFRA); /* Initialize Rx filter. */ urtwn_rxfilter_init(sc); /* Set response rate. */ reg = urtwn_read_4(sc, R92C_RRSR); reg = RW(reg, R92C_RRSR_RATE_BITMAP, R92C_RRSR_RATE_CCK_ONLY_1M); urtwn_write_4(sc, R92C_RRSR, reg); /* Set short/long retry limits. */ urtwn_write_2(sc, R92C_RL, SM(R92C_RL_SRL, 0x30) | SM(R92C_RL_LRL, 0x30)); /* Initialize EDCA parameters. */ urtwn_edca_init(sc); /* Setup rate fallback. */ if (!(sc->chip & URTWN_CHIP_88E)) { urtwn_write_4(sc, R92C_DARFRC + 0, 0x00000000); urtwn_write_4(sc, R92C_DARFRC + 4, 0x10080404); urtwn_write_4(sc, R92C_RARFRC + 0, 0x04030201); urtwn_write_4(sc, R92C_RARFRC + 4, 0x08070605); } urtwn_write_1(sc, R92C_FWHW_TXQ_CTRL, urtwn_read_1(sc, R92C_FWHW_TXQ_CTRL) | R92C_FWHW_TXQ_CTRL_AMPDU_RTY_NEW); /* Set ACK timeout. */ urtwn_write_1(sc, R92C_ACKTO, 0x40); /* Setup USB aggregation. */ reg = urtwn_read_4(sc, R92C_TDECTRL); reg = RW(reg, R92C_TDECTRL_BLK_DESC_NUM, 6); urtwn_write_4(sc, R92C_TDECTRL, reg); urtwn_write_1(sc, R92C_TRXDMA_CTRL, urtwn_read_1(sc, R92C_TRXDMA_CTRL) | R92C_TRXDMA_CTRL_RXDMA_AGG_EN); urtwn_write_1(sc, R92C_RXDMA_AGG_PG_TH, 48); if (sc->chip & URTWN_CHIP_88E) urtwn_write_1(sc, R92C_RXDMA_AGG_PG_TH + 1, 4); else { urtwn_write_1(sc, R92C_USB_DMA_AGG_TO, 4); urtwn_write_1(sc, R92C_USB_SPECIAL_OPTION, urtwn_read_1(sc, R92C_USB_SPECIAL_OPTION) | R92C_USB_SPECIAL_OPTION_AGG_EN); urtwn_write_1(sc, R92C_USB_AGG_TH, 8); urtwn_write_1(sc, R92C_USB_AGG_TO, 6); } /* Initialize beacon parameters. */ urtwn_write_2(sc, R92C_BCN_CTRL, 0x1010); urtwn_write_2(sc, R92C_TBTT_PROHIBIT, 0x6404); urtwn_write_1(sc, R92C_DRVERLYINT, 0x05); urtwn_write_1(sc, R92C_BCNDMATIM, 0x02); urtwn_write_2(sc, R92C_BCNTCFG, 0x660f); if (!(sc->chip & URTWN_CHIP_88E)) { /* Setup AMPDU aggregation. */ urtwn_write_4(sc, R92C_AGGLEN_LMT, 0x99997631); /* MCS7~0 */ urtwn_write_1(sc, R92C_AGGR_BREAK_TIME, 0x16); urtwn_write_2(sc, R92C_MAX_AGGR_NUM, 0x0708); urtwn_write_1(sc, R92C_BCN_MAX_ERR, 0xff); } #ifndef URTWN_WITHOUT_UCODE /* Load 8051 microcode. */ error = urtwn_load_firmware(sc); if (error == 0) sc->sc_flags |= URTWN_FW_LOADED; #endif /* Initialize MAC/BB/RF blocks. */ error = urtwn_mac_init(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: error while initializing MAC block\n", __func__); goto fail; } urtwn_bb_init(sc); urtwn_rf_init(sc); /* Reinitialize Rx filter (D3845 is not committed yet). */ urtwn_rxfilter_init(sc); if (sc->chip & URTWN_CHIP_88E) { urtwn_write_2(sc, R92C_CR, urtwn_read_2(sc, R92C_CR) | R92C_CR_MACTXEN | R92C_CR_MACRXEN); } /* Turn CCK and OFDM blocks on. */ reg = urtwn_bb_read(sc, R92C_FPGA0_RFMOD); reg |= R92C_RFMOD_CCK_EN; usb_err = urtwn_bb_write(sc, R92C_FPGA0_RFMOD, reg); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; reg = urtwn_bb_read(sc, R92C_FPGA0_RFMOD); reg |= R92C_RFMOD_OFDM_EN; usb_err = urtwn_bb_write(sc, R92C_FPGA0_RFMOD, reg); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; /* Clear per-station keys table. */ urtwn_cam_init(sc); /* Enable decryption / encryption. */ urtwn_write_2(sc, R92C_SECCFG, R92C_SECCFG_TXUCKEY_DEF | R92C_SECCFG_RXUCKEY_DEF | R92C_SECCFG_TXENC_ENA | R92C_SECCFG_RXDEC_ENA | R92C_SECCFG_TXBCKEY_DEF | R92C_SECCFG_RXBCKEY_DEF); /* * Install static keys (if any). * Must be called after urtwn_cam_init(). */ ieee80211_runtask(ic, &sc->cmdq_task); /* Enable hardware sequence numbering. */ urtwn_write_1(sc, R92C_HWSEQ_CTRL, R92C_TX_QUEUE_ALL); /* Enable per-packet TX report. */ if (sc->chip & URTWN_CHIP_88E) { urtwn_write_1(sc, R88E_TX_RPT_CTRL, urtwn_read_1(sc, R88E_TX_RPT_CTRL) | R88E_TX_RPT1_ENA); } /* Perform LO and IQ calibrations. */ urtwn_iq_calib(sc); /* Perform LC calibration. */ urtwn_lc_calib(sc); /* Fix USB interference issue. */ if (!(sc->chip & URTWN_CHIP_88E)) { urtwn_write_1(sc, 0xfe40, 0xe0); urtwn_write_1(sc, 0xfe41, 0x8d); urtwn_write_1(sc, 0xfe42, 0x80); urtwn_pa_bias_init(sc); } /* Initialize GPIO setting. */ urtwn_write_1(sc, R92C_GPIO_MUXCFG, urtwn_read_1(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_ENBT); /* Fix for lower temperature. */ if (!(sc->chip & URTWN_CHIP_88E)) urtwn_write_1(sc, 0x15, 0xe9); usbd_transfer_start(sc->sc_xfer[URTWN_BULK_RX]); sc->sc_flags |= URTWN_RUNNING; callout_reset(&sc->sc_watchdog_ch, hz, urtwn_watchdog, sc); fail: if (usb_err != USB_ERR_NORMAL_COMPLETION) error = EIO; URTWN_UNLOCK(sc); return (error); } static void urtwn_stop(struct urtwn_softc *sc) { URTWN_LOCK(sc); if (!(sc->sc_flags & URTWN_RUNNING)) { URTWN_UNLOCK(sc); return; } sc->sc_flags &= ~(URTWN_RUNNING | URTWN_FW_LOADED | URTWN_TEMP_MEASURED); sc->thcal_lctemp = 0; callout_stop(&sc->sc_watchdog_ch); urtwn_abort_xfers(sc); urtwn_drain_mbufq(sc); urtwn_power_off(sc); URTWN_UNLOCK(sc); } static void urtwn_abort_xfers(struct urtwn_softc *sc) { int i; URTWN_ASSERT_LOCKED(sc); /* abort any pending transfers */ for (i = 0; i < URTWN_N_TRANSFER; i++) usbd_transfer_stop(sc->sc_xfer[i]); } static int urtwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct urtwn_softc *sc = ic->ic_softc; struct urtwn_data *bf; int error; /* prevent management frames from being sent if we're not ready */ URTWN_LOCK(sc); if (!(sc->sc_flags & URTWN_RUNNING)) { error = ENETDOWN; goto end; } bf = urtwn_getbuf(sc); if (bf == NULL) { error = ENOBUFS; goto end; } if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ error = urtwn_tx_data(sc, ni, m, bf); } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ error = urtwn_tx_raw(sc, ni, m, bf, params); } if (error != 0) { STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); goto end; } sc->sc_txtimer = 5; callout_reset(&sc->sc_watchdog_ch, hz, urtwn_watchdog, sc); end: if (error != 0) m_freem(m); URTWN_UNLOCK(sc); return (error); } static void urtwn_ms_delay(struct urtwn_softc *sc) { usb_pause_mtx(&sc->sc_mtx, hz / 1000); } static device_method_t urtwn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, urtwn_match), DEVMETHOD(device_attach, urtwn_attach), DEVMETHOD(device_detach, urtwn_detach), DEVMETHOD_END }; static driver_t urtwn_driver = { "urtwn", urtwn_methods, sizeof(struct urtwn_softc) }; static devclass_t urtwn_devclass; DRIVER_MODULE(urtwn, uhub, urtwn_driver, urtwn_devclass, NULL, NULL); MODULE_DEPEND(urtwn, usb, 1, 1, 1); MODULE_DEPEND(urtwn, wlan, 1, 1, 1); #ifndef URTWN_WITHOUT_UCODE MODULE_DEPEND(urtwn, firmware, 1, 1, 1); #endif MODULE_VERSION(urtwn, 1); USB_PNP_HOST_INFO(urtwn_devs); Index: projects/release-pkg/sys/powerpc/mpc85xx/mpc85xx.c =================================================================== --- projects/release-pkg/sys/powerpc/mpc85xx/mpc85xx.c (revision 295925) +++ projects/release-pkg/sys/powerpc/mpc85xx/mpc85xx.c (revision 295926) @@ -1,422 +1,423 @@ /*- * Copyright (C) 2008 Semihalf, Rafal Jaworowski * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * MPC85xx system specific routines */ uint32_t ccsr_read4(uintptr_t addr) { volatile uint32_t *ptr = (void *)addr; return (*ptr); } void ccsr_write4(uintptr_t addr, uint32_t val) { volatile uint32_t *ptr = (void *)addr; *ptr = val; powerpc_iomb(); } int law_getmax(void) { uint32_t ver; int law_max; ver = SVR_VER(mfspr(SPR_SVR)); switch (ver) { case SVR_MPC8555: case SVR_MPC8555E: law_max = 8; break; case SVR_MPC8533: case SVR_MPC8533E: case SVR_MPC8548: case SVR_MPC8548E: law_max = 10; break; case SVR_P5020: case SVR_P5020E: law_max = 32; break; default: law_max = 8; } return (law_max); } static inline void law_write(uint32_t n, uint64_t bar, uint32_t sr) { #if defined(QORIQ_DPAA) ccsr_write4(OCP85XX_LAWBARH(n), bar >> 32); ccsr_write4(OCP85XX_LAWBARL(n), bar); #else ccsr_write4(OCP85XX_LAWBAR(n), bar >> 12); #endif ccsr_write4(OCP85XX_LAWSR(n), sr); /* * The last write to LAWAR should be followed by a read * of LAWAR before any device try to use any of windows. * What more the read of LAWAR should be followed by isync * instruction. */ ccsr_read4(OCP85XX_LAWSR(n)); isync(); } static inline void law_read(uint32_t n, uint64_t *bar, uint32_t *sr) { #if defined(QORIQ_DPAA) *bar = (uint64_t)ccsr_read4(OCP85XX_LAWBARH(n)) << 32 | ccsr_read4(OCP85XX_LAWBARL(n)); #else *bar = (uint64_t)ccsr_read4(OCP85XX_LAWBAR(n)) << 12; #endif *sr = ccsr_read4(OCP85XX_LAWSR(n)); } static int law_find_free(void) { uint32_t i,sr; uint64_t bar; int law_max; law_max = law_getmax(); /* Find free LAW */ for (i = 0; i < law_max; i++) { law_read(i, &bar, &sr); if ((sr & 0x80000000) == 0) break; } return (i); } -#define _LAW_SR(trgt,size) (0x80000000 | (trgt << 20) | (ffsl(size) - 2)) +#define _LAW_SR(trgt,size) (0x80000000 | (trgt << 20) | \ + (flsl(size + (size - 1)) - 2)) int law_enable(int trgt, uint64_t bar, uint32_t size) { uint64_t bar_tmp; uint32_t sr, sr_tmp; int i, law_max; if (size == 0) return (0); law_max = law_getmax(); sr = _LAW_SR(trgt, size); /* Bail if already programmed. */ for (i = 0; i < law_max; i++) { law_read(i, &bar_tmp, &sr_tmp); if (sr == sr_tmp && bar == bar_tmp) return (0); } /* Find an unused access window. */ i = law_find_free(); if (i == law_max) return (ENOSPC); law_write(i, bar, sr); return (0); } int law_disable(int trgt, uint64_t bar, uint32_t size) { uint64_t bar_tmp; uint32_t sr, sr_tmp; int i, law_max; law_max = law_getmax(); sr = _LAW_SR(trgt, size); /* Find and disable requested LAW. */ for (i = 0; i < law_max; i++) { law_read(i, &bar_tmp, &sr_tmp); if (sr == sr_tmp && bar == bar_tmp) { law_write(i, 0, 0); return (0); } } return (ENOENT); } int law_pci_target(struct resource *res, int *trgt_mem, int *trgt_io) { u_long start; uint32_t ver; int trgt, rv; ver = SVR_VER(mfspr(SPR_SVR)); start = rman_get_start(res) & 0xf000; rv = 0; trgt = -1; switch (start) { case 0x0000: case 0x8000: trgt = 0; break; case 0x1000: case 0x9000: trgt = 1; break; case 0x2000: case 0xa000: if (ver == SVR_MPC8548E || ver == SVR_MPC8548) trgt = 3; else trgt = 2; break; case 0x3000: case 0xb000: if (ver == SVR_MPC8548E || ver == SVR_MPC8548) rv = EINVAL; else trgt = 3; break; default: rv = ENXIO; } if (rv == 0) { *trgt_mem = trgt; *trgt_io = trgt; } return (rv); } static void l3cache_inval(void) { /* Flash invalidate the CPC and clear all the locks */ ccsr_write4(OCP85XX_CPC_CSR0, OCP85XX_CPC_CSR0_FI | OCP85XX_CPC_CSR0_LFC); while (ccsr_read4(OCP85XX_CPC_CSR0) & (OCP85XX_CPC_CSR0_FI | OCP85XX_CPC_CSR0_LFC)) ; } static void l3cache_enable(void) { ccsr_write4(OCP85XX_CPC_CSR0, OCP85XX_CPC_CSR0_CE | OCP85XX_CPC_CSR0_PE); /* Read back to sync write */ ccsr_read4(OCP85XX_CPC_CSR0); } void mpc85xx_enable_l3_cache(void) { uint32_t csr, size, ver; /* Enable L3 CoreNet Platform Cache (CPC) */ ver = SVR_VER(mfspr(SPR_SVR)); if (ver == SVR_P2041 || ver == SVR_P2041E || ver == SVR_P3041 || ver == SVR_P3041E || ver == SVR_P5020 || ver == SVR_P5020E) { csr = ccsr_read4(OCP85XX_CPC_CSR0); if ((csr & OCP85XX_CPC_CSR0_CE) == 0) { l3cache_inval(); l3cache_enable(); } csr = ccsr_read4(OCP85XX_CPC_CSR0); if ((boothowto & RB_VERBOSE) != 0 || (csr & OCP85XX_CPC_CSR0_CE) == 0) { size = OCP85XX_CPC_CFG0_SZ_K(ccsr_read4(OCP85XX_CPC_CFG0)); printf("L3 Corenet Platform Cache: %d KB %sabled\n", size, (csr & OCP85XX_CPC_CSR0_CE) == 0 ? "dis" : "en"); } } } static void mpc85xx_dataloss_erratum_spr976(void) { uint32_t svr = SVR_VER(mfspr(SPR_SVR)); /* Ignore whether it's the E variant */ svr &= ~0x8; if (svr != SVR_P3041 && svr != SVR_P4040 && svr != SVR_P4080 && svr != SVR_P5020) return; mb(); isync(); mtspr(976, (mfspr(976) & ~0x1f8) | 0x48); isync(); } static vm_offset_t mpc85xx_map_dcsr(void) { phandle_t node; u_long b, s; int err; /* * Try to access the dcsr node directly i.e. through /aliases/. */ if ((node = OF_finddevice("dcsr")) != -1) if (fdt_is_compatible_strict(node, "fsl,dcsr")) goto moveon; /* * Find the node the long way. */ if ((node = OF_finddevice("/")) == -1) return (ENXIO); if ((node = ofw_bus_find_compatible(node, "fsl,dcsr")) == 0) return (ENXIO); moveon: err = fdt_get_range(node, 0, &b, &s); if (err != 0) return (err); #ifdef QORIQ_DPAA law_enable(OCP85XX_TGTIF_DCSR, b, 0x400000); #endif return pmap_early_io_map(b, 0x400000); } void mpc85xx_fix_errata(vm_offset_t va_ccsr) { uint32_t svr = SVR_VER(mfspr(SPR_SVR)); vm_offset_t va_dcsr; /* Ignore whether it's the E variant */ svr &= ~0x8; if (svr != SVR_P3041 && svr != SVR_P4040 && svr != SVR_P4080 && svr != SVR_P5020) return; if (mfmsr() & PSL_EE) return; /* * dcsr region need to be mapped thus patch can refer to. * Align dcsr right after ccsbar. */ va_dcsr = mpc85xx_map_dcsr(); if (va_dcsr == 0) goto err; /* * As A004510 errata specify, special purpose register 976 * SPR976[56:60] = 6'b001001 must be set. e500mc core reference manual * does not document SPR976 register. */ mpc85xx_dataloss_erratum_spr976(); /* * Specific settings in the CCF and core platform cache (CPC) * are required to reconfigure the CoreNet coherency fabric. * The register settings that should be updated are described * in errata and relay on base address, offset and updated value. * Special conditions must be used to update these registers correctly. */ dataloss_erratum_access(va_dcsr + 0xb0e08, 0xe0201800); dataloss_erratum_access(va_dcsr + 0xb0e18, 0xe0201800); dataloss_erratum_access(va_dcsr + 0xb0e38, 0xe0400000); dataloss_erratum_access(va_dcsr + 0xb0008, 0x00900000); dataloss_erratum_access(va_dcsr + 0xb0e40, 0xe00a0000); switch (svr) { case SVR_P5020: dataloss_erratum_access(va_ccsr + 0x18600, 0xc0000000); break; case SVR_P4040: case SVR_P4080: dataloss_erratum_access(va_ccsr + 0x18600, 0xff000000); break; case SVR_P3041: dataloss_erratum_access(va_ccsr + 0x18600, 0xf0000000); } dataloss_erratum_access(va_ccsr + 0x10f00, 0x415e5000); dataloss_erratum_access(va_ccsr + 0x11f00, 0x415e5000); err: return; } Index: projects/release-pkg/sys/riscv/include/atomic.h =================================================================== --- projects/release-pkg/sys/riscv/include/atomic.h (revision 295925) +++ projects/release-pkg/sys/riscv/include/atomic.h (revision 295926) @@ -1,465 +1,461 @@ /*- * Copyright (c) 2015 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_ATOMIC_H_ #define _MACHINE_ATOMIC_H_ #define fence() __asm __volatile("fence" ::: "memory"); #define mb() fence() #define rmb() fence() #define wmb() fence() #define ATOMIC_ACQ_REL(NAME, WIDTH) \ static __inline void \ atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\ { \ atomic_##NAME##_##WIDTH(p, v); \ fence(); \ } \ \ static __inline void \ atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\ { \ fence(); \ atomic_##NAME##_##WIDTH(p, v); \ } static __inline void atomic_add_32(volatile uint32_t *p, uint32_t val) { __asm __volatile("amoadd.w zero, %1, %0" : "+A" (*p) : "r" (val) : "memory"); } static __inline void atomic_subtract_32(volatile uint32_t *p, uint32_t val) { __asm __volatile("amoadd.w zero, %1, %0" : "+A" (*p) : "r" (-val) : "memory"); } static __inline void atomic_set_32(volatile uint32_t *p, uint32_t val) { __asm __volatile("amoor.w zero, %1, %0" : "+A" (*p) : "r" (val) : "memory"); } static __inline void atomic_clear_32(volatile uint32_t *p, uint32_t val) { __asm __volatile("amoand.w zero, %1, %0" : "+A" (*p) : "r" (~val) : "memory"); } static __inline int atomic_cmpset_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval) { uint32_t tmp; int res; res = 0; __asm __volatile( "0:" "li %1, 1\n" /* Preset to fail */ "lr.w %0, %2\n" "bne %0, %z3, 1f\n" "sc.w %1, %z4, %2\n" "bnez %1, 0b\n" "1:" : "=&r" (tmp), "=&r" (res), "+A" (*p) : "rJ" (cmpval), "rJ" (newval) : "memory"); return (!res); } static __inline uint32_t atomic_fetchadd_32(volatile uint32_t *p, uint32_t val) { uint32_t ret; __asm __volatile("amoadd.w %0, %2, %1" : "=&r" (ret), "+A" (*p) : "r" (val) : "memory"); return (ret); } static __inline uint32_t atomic_readandclear_32(volatile uint32_t *p) { uint32_t ret; uint32_t val; val = 0; __asm __volatile("amoswap.w %0, %2, %1" : "=&r"(ret), "+A" (*p) : "r" (val) : "memory"); return (ret); } #define atomic_add_int atomic_add_32 #define atomic_clear_int atomic_clear_32 #define atomic_cmpset_int atomic_cmpset_32 #define atomic_fetchadd_int atomic_fetchadd_32 #define atomic_readandclear_int atomic_readandclear_32 #define atomic_set_int atomic_set_32 #define atomic_subtract_int atomic_subtract_32 ATOMIC_ACQ_REL(set, 32) ATOMIC_ACQ_REL(clear, 32) ATOMIC_ACQ_REL(add, 32) ATOMIC_ACQ_REL(subtract, 32) static __inline int atomic_cmpset_acq_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval) { int res; res = atomic_cmpset_32(p, cmpval, newval); fence(); return (res); } static __inline int atomic_cmpset_rel_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval) { fence(); return (atomic_cmpset_32(p, cmpval, newval)); } static __inline uint32_t atomic_load_acq_32(volatile uint32_t *p) { uint32_t ret; ret = *p; fence(); return (ret); } static __inline void atomic_store_rel_32(volatile uint32_t *p, uint32_t val) { fence(); *p = val; } #define atomic_add_acq_int atomic_add_acq_32 #define atomic_clear_acq_int atomic_clear_acq_32 #define atomic_cmpset_acq_int atomic_cmpset_acq_32 #define atomic_load_acq_int atomic_load_acq_32 #define atomic_set_acq_int atomic_set_acq_32 #define atomic_subtract_acq_int atomic_subtract_acq_32 #define atomic_add_rel_int atomic_add_rel_32 #define atomic_clear_rel_int atomic_add_rel_32 #define atomic_cmpset_rel_int atomic_cmpset_rel_32 #define atomic_set_rel_int atomic_set_rel_32 #define atomic_subtract_rel_int atomic_subtract_rel_32 #define atomic_store_rel_int atomic_store_rel_32 static __inline void atomic_add_64(volatile uint64_t *p, uint64_t val) { __asm __volatile("amoadd.d zero, %1, %0" : "+A" (*p) : "r" (val) : "memory"); } static __inline void atomic_subtract_64(volatile uint64_t *p, uint64_t val) { __asm __volatile("amoadd.d zero, %1, %0" : "+A" (*p) : "r" (-val) : "memory"); } static __inline void atomic_set_64(volatile uint64_t *p, uint64_t val) { __asm __volatile("amoor.d zero, %1, %0" : "+A" (*p) : "r" (val) : "memory"); } static __inline void atomic_clear_64(volatile uint64_t *p, uint64_t val) { __asm __volatile("amoand.d zero, %1, %0" : "+A" (*p) : "r" (~val) : "memory"); } static __inline int atomic_cmpset_64(volatile uint64_t *p, uint64_t cmpval, uint64_t newval) { uint64_t tmp; int res; res = 0; __asm __volatile( "0:" "li %1, 1\n" /* Preset to fail */ "lr.d %0, %2\n" "bne %0, %z3, 1f\n" "sc.d %1, %z4, %2\n" "bnez %1, 0b\n" "1:" : "=&r" (tmp), "=&r" (res), "+A" (*p) : "rJ" (cmpval), "rJ" (newval) : "memory"); return (!res); } static __inline uint64_t atomic_fetchadd_64(volatile uint64_t *p, uint64_t val) { uint64_t ret; __asm __volatile("amoadd.d %0, %2, %1" : "=&r" (ret), "+A" (*p) : "r" (val) : "memory"); return (ret); } static __inline uint64_t atomic_readandclear_64(volatile uint64_t *p) { uint64_t ret; uint64_t val; val = 0; __asm __volatile("amoswap.d %0, %2, %1" : "=&r"(ret), "+A" (*p) : "r" (val) : "memory"); return (ret); } static __inline uint32_t atomic_swap_32(volatile uint32_t *p, uint32_t val) { uint32_t old; __asm __volatile("amoswap.w %0, %2, %1" : "=&r"(old), "+A" (*p) : "r" (val) : "memory"); return (old); } static __inline uint64_t atomic_swap_64(volatile uint64_t *p, uint64_t val) { uint64_t old; __asm __volatile("amoswap.d %0, %2, %1" : "=&r"(old), "+A" (*p) : "r" (val) : "memory"); return (old); } #define atomic_add_long atomic_add_64 #define atomic_clear_long atomic_clear_64 #define atomic_cmpset_long atomic_cmpset_64 #define atomic_fetchadd_long atomic_fetchadd_64 #define atomic_readandclear_long atomic_readandclear_64 #define atomic_set_long atomic_set_64 #define atomic_subtract_long atomic_subtract_64 #define atomic_add_ptr atomic_add_64 #define atomic_clear_ptr atomic_clear_64 #define atomic_cmpset_ptr atomic_cmpset_64 #define atomic_fetchadd_ptr atomic_fetchadd_64 #define atomic_readandclear_ptr atomic_readandclear_64 #define atomic_set_ptr atomic_set_64 #define atomic_subtract_ptr atomic_subtract_64 ATOMIC_ACQ_REL(set, 64) ATOMIC_ACQ_REL(clear, 64) ATOMIC_ACQ_REL(add, 64) ATOMIC_ACQ_REL(subtract, 64) static __inline int atomic_cmpset_acq_64(volatile uint64_t *p, uint64_t cmpval, uint64_t newval) { int res; res = atomic_cmpset_64(p, cmpval, newval); fence(); return (res); } static __inline int atomic_cmpset_rel_64(volatile uint64_t *p, uint64_t cmpval, uint64_t newval) { fence(); return (atomic_cmpset_64(p, cmpval, newval)); } static __inline uint64_t atomic_load_acq_64(volatile uint64_t *p) { uint64_t ret; ret = *p; fence(); return (ret); } static __inline void atomic_store_rel_64(volatile uint64_t *p, uint64_t val) { fence(); *p = val; } -#define atomic_add_acq_int atomic_add_acq_32 -#define atomic_clear_acq_int atomic_clear_acq_32 -#define atomic_cmpset_acq_int atomic_cmpset_acq_32 - #define atomic_add_acq_long atomic_add_acq_64 #define atomic_clear_acq_long atomic_add_acq_64 #define atomic_cmpset_acq_long atomic_cmpset_acq_64 #define atomic_load_acq_long atomic_load_acq_64 #define atomic_set_acq_long atomic_set_acq_64 #define atomic_subtract_acq_long atomic_subtract_acq_64 #define atomic_add_acq_ptr atomic_add_acq_64 #define atomic_clear_acq_ptr atomic_add_acq_64 #define atomic_cmpset_acq_ptr atomic_cmpset_acq_64 #define atomic_load_acq_ptr atomic_load_acq_64 #define atomic_set_acq_ptr atomic_set_acq_64 #define atomic_subtract_acq_ptr atomic_subtract_acq_64 static __inline void atomic_thread_fence_acq(void) { fence(); } static __inline void atomic_thread_fence_rel(void) { fence(); } static __inline void atomic_thread_fence_acq_rel(void) { fence(); } static __inline void atomic_thread_fence_seq_cst(void) { fence(); } #define atomic_add_rel_long atomic_add_rel_64 #define atomic_clear_rel_long atomic_clear_rel_64 #define atomic_add_rel_long atomic_add_rel_64 #define atomic_clear_rel_long atomic_clear_rel_64 #define atomic_cmpset_rel_long atomic_cmpset_rel_64 #define atomic_set_rel_long atomic_set_rel_64 #define atomic_subtract_rel_long atomic_subtract_rel_64 #define atomic_store_rel_long atomic_store_rel_64 #define atomic_add_rel_ptr atomic_add_rel_64 #define atomic_clear_rel_ptr atomic_clear_rel_64 #define atomic_cmpset_rel_ptr atomic_cmpset_rel_64 #define atomic_set_rel_ptr atomic_set_rel_64 #define atomic_subtract_rel_ptr atomic_subtract_rel_64 #define atomic_store_rel_ptr atomic_store_rel_64 #endif /* _MACHINE_ATOMIC_H_ */ Index: projects/release-pkg/sys/riscv/riscv/machdep.c =================================================================== --- projects/release-pkg/sys/riscv/riscv/machdep.c (revision 295925) +++ projects/release-pkg/sys/riscv/riscv/machdep.c (revision 295926) @@ -1,794 +1,799 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2015 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #ifdef FDT #include #include #endif struct pcpu __pcpu[MAXCPU]; static struct trapframe proc0_tf; vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2]; vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2]; int early_boot = 1; int cold = 1; long realmem = 0; long Maxmem = 0; #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) vm_paddr_t physmap[PHYSMAP_SIZE]; u_int physmap_idx; struct kva_md_info kmi; int64_t dcache_line_size; /* The minimum D cache line size */ int64_t icache_line_size; /* The minimum I cache line size */ int64_t idcache_line_size; /* The minimum cache line size */ extern int *end; extern int *initstack_end; struct pcpu *pcpup; uintptr_t mcall_trap(uintptr_t mcause, uintptr_t* regs); uintptr_t mcall_trap(uintptr_t mcause, uintptr_t* regs) { return (0); } static void cpu_startup(void *dummy) { identify_cpu(); vm_ksubmap_init(&kmi); bufinit(); vm_pager_bufferinit(); } SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); int cpu_idle_wakeup(int cpu) { return (0); } void bzero(void *buf, size_t len) { uint8_t *p; p = buf; while(len-- > 0) *p++ = 0; } int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; regs->sepc = frame->tf_sepc; regs->sstatus = frame->tf_sstatus; regs->ra = frame->tf_ra; regs->sp = frame->tf_sp; regs->gp = frame->tf_gp; regs->tp = frame->tf_tp; memcpy(regs->t, frame->tf_t, sizeof(regs->t)); memcpy(regs->s, frame->tf_s, sizeof(regs->s)); memcpy(regs->a, frame->tf_a, sizeof(regs->a)); return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; frame->tf_sepc = regs->sepc; frame->tf_sstatus = regs->sstatus; frame->tf_ra = regs->ra; frame->tf_sp = regs->sp; frame->tf_gp = regs->gp; frame->tf_tp = regs->tp; memcpy(frame->tf_t, regs->t, sizeof(frame->tf_t)); memcpy(frame->tf_s, regs->s, sizeof(frame->tf_s)); memcpy(frame->tf_a, regs->a, sizeof(frame->tf_a)); return (0); } int fill_fpregs(struct thread *td, struct fpreg *regs) { /* TODO */ bzero(regs, sizeof(*regs)); return (0); } int set_fpregs(struct thread *td, struct fpreg *regs) { /* TODO */ return (0); } int fill_dbregs(struct thread *td, struct dbreg *regs) { panic("fill_dbregs"); } int set_dbregs(struct thread *td, struct dbreg *regs) { panic("set_dbregs"); } int ptrace_set_pc(struct thread *td, u_long addr) { panic("ptrace_set_pc"); return (0); } int ptrace_single_step(struct thread *td) { /* TODO; */ return (0); } int ptrace_clear_single_step(struct thread *td) { /* TODO; */ return (0); } void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf; tf = td->td_frame; memset(tf, 0, sizeof(struct trapframe)); /* * We need to set a0 for init as it doesn't call * cpu_set_syscall_retval to copy the value. We also * need to set td_retval for the cases where we do. */ tf->tf_a[0] = td->td_retval[0] = stack; tf->tf_sp = STACKALIGN(stack); tf->tf_ra = imgp->entry_addr; tf->tf_sepc = imgp->entry_addr; } /* Sanity check these are the same size, they will be memcpy'd to and fro */ CTASSERT(sizeof(((struct trapframe *)0)->tf_a) == sizeof((struct gpregs *)0)->gp_a); CTASSERT(sizeof(((struct trapframe *)0)->tf_s) == sizeof((struct gpregs *)0)->gp_s); CTASSERT(sizeof(((struct trapframe *)0)->tf_t) == sizeof((struct gpregs *)0)->gp_t); CTASSERT(sizeof(((struct trapframe *)0)->tf_a) == sizeof((struct reg *)0)->a); CTASSERT(sizeof(((struct trapframe *)0)->tf_s) == sizeof((struct reg *)0)->s); CTASSERT(sizeof(((struct trapframe *)0)->tf_t) == sizeof((struct reg *)0)->t); int get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) { struct trapframe *tf = td->td_frame; memcpy(mcp->mc_gpregs.gp_t, tf->tf_t, sizeof(mcp->mc_gpregs.gp_t)); memcpy(mcp->mc_gpregs.gp_s, tf->tf_s, sizeof(mcp->mc_gpregs.gp_s)); memcpy(mcp->mc_gpregs.gp_a, tf->tf_a, sizeof(mcp->mc_gpregs.gp_a)); if (clear_ret & GET_MC_CLEAR_RET) { mcp->mc_gpregs.gp_a[0] = 0; mcp->mc_gpregs.gp_t[0] = 0; /* clear syscall error */ } mcp->mc_gpregs.gp_ra = tf->tf_ra; mcp->mc_gpregs.gp_sp = tf->tf_sp; mcp->mc_gpregs.gp_gp = tf->tf_gp; mcp->mc_gpregs.gp_tp = tf->tf_tp; mcp->mc_gpregs.gp_sepc = tf->tf_sepc; mcp->mc_gpregs.gp_sstatus = tf->tf_sstatus; return (0); } int set_mcontext(struct thread *td, mcontext_t *mcp) { struct trapframe *tf; tf = td->td_frame; memcpy(tf->tf_t, mcp->mc_gpregs.gp_t, sizeof(tf->tf_t)); memcpy(tf->tf_s, mcp->mc_gpregs.gp_s, sizeof(tf->tf_s)); memcpy(tf->tf_a, mcp->mc_gpregs.gp_a, sizeof(tf->tf_a)); tf->tf_ra = mcp->mc_gpregs.gp_ra; tf->tf_sp = mcp->mc_gpregs.gp_sp; tf->tf_gp = mcp->mc_gpregs.gp_gp; tf->tf_tp = mcp->mc_gpregs.gp_tp; tf->tf_sepc = mcp->mc_gpregs.gp_sepc; tf->tf_sstatus = mcp->mc_gpregs.gp_sstatus; return (0); } static void get_fpcontext(struct thread *td, mcontext_t *mcp) { /* TODO */ } static void set_fpcontext(struct thread *td, mcontext_t *mcp) { /* TODO */ } void cpu_idle(int busy) { spinlock_enter(); if (!busy) cpu_idleclock(); if (!sched_runnable()) __asm __volatile( "fence \n" "wfi \n"); if (!busy) cpu_activeclock(); spinlock_exit(); } void cpu_halt(void) { panic("cpu_halt"); } /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { /* TBD */ } /* Get current clock frequency for the given CPU ID. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { panic("cpu_est_clockrate"); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { } void spinlock_enter(void) { struct thread *td; td = curthread; if (td->td_md.md_spinlock_count == 0) { td->td_md.md_spinlock_count = 1; td->td_md.md_saved_sstatus_ie = intr_disable(); } else td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; register_t sstatus_ie; td = curthread; critical_exit(); sstatus_ie = td->td_md.md_saved_sstatus_ie; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) intr_restore(sstatus_ie); } #ifndef _SYS_SYSPROTO_H_ struct sigreturn_args { ucontext_t *ucp; }; #endif int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { uint64_t sstatus; ucontext_t uc; int error; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); /* * Make sure the processor mode has not been tampered with and * interrupts have not been disabled. */ sstatus = uc.uc_mcontext.mc_gpregs.gp_sstatus; if ((sstatus & SSTATUS_PS) != 0 || (sstatus & SSTATUS_PIE) == 0) return (EINVAL); error = set_mcontext(td, &uc.uc_mcontext); if (error != 0) return (error); set_fpcontext(td, &uc.uc_mcontext); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { memcpy(pcb->pcb_t, tf->tf_t, sizeof(tf->tf_t)); memcpy(pcb->pcb_s, tf->tf_s, sizeof(tf->tf_s)); memcpy(pcb->pcb_a, tf->tf_a, sizeof(tf->tf_a)); pcb->pcb_ra = tf->tf_ra; pcb->pcb_sp = tf->tf_sp; pcb->pcb_gp = tf->tf_gp; pcb->pcb_tp = tf->tf_tp; pcb->pcb_sepc = tf->tf_sepc; } void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct sigframe *fp, frame; struct sysentvec *sysent; struct trapframe *tf; struct sigacts *psp; struct thread *td; struct proc *p; int onstack; int code; int sig; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; code = ksi->ksi_code; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); } else { fp = (struct sigframe *)td->td_frame->tf_sp; } /* Make room, keeping the stack aligned */ fp--; fp = (struct sigframe *)STACKALIGN(fp); /* Fill in the frame to copy out */ get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); get_fpcontext(td, &frame.sf_uc.uc_mcontext); frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; frame.sf_uc.uc_stack = td->td_sigstk; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } tf->tf_a[0] = sig; tf->tf_a[1] = (register_t)&fp->sf_si; tf->tf_a[2] = (register_t)&fp->sf_uc; tf->tf_sepc = (register_t)catcher; tf->tf_sp = (register_t)fp; sysent = p->p_sysent; if (sysent->sv_sigcode_base != 0) tf->tf_ra = (register_t)sysent->sv_sigcode_base; else tf->tf_ra = (register_t)(sysent->sv_psstrings - *(sysent->sv_szsigcode)); - CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr, + CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_sepc, tf->tf_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } static void init_proc0(vm_offset_t kstack) { pcpup = &__pcpu[0]; proc_linkup0(&proc0, &thread0); thread0.td_kstack = kstack; thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1; thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; } static int add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap, u_int *physmap_idxp) { u_int i, insert_idx, _physmap_idx; _physmap_idx = *physmap_idxp; if (length == 0) return (1); /* * Find insertion point while checking for overlap. Start off by * assuming the new entry will be added to the end. */ insert_idx = _physmap_idx; for (i = 0; i <= _physmap_idx; i += 2) { if (base < physmap[i + 1]) { if (base + length <= physmap[i]) { insert_idx = i; break; } if (boothowto & RB_VERBOSE) printf( "Overlapping memory regions, ignoring second region\n"); return (1); } } /* See if we can prepend to the next entry. */ if (insert_idx <= _physmap_idx && base + length == physmap[insert_idx]) { physmap[insert_idx] = base; return (1); } /* See if we can append to the previous entry. */ if (insert_idx > 0 && base == physmap[insert_idx - 1]) { physmap[insert_idx - 1] += length; return (1); } _physmap_idx += 2; *physmap_idxp = _physmap_idx; if (_physmap_idx == PHYSMAP_SIZE) { printf( "Too many segments in the physical address map, giving up\n"); return (0); } /* * Move the last 'N' entries down to make room for the new * entry if needed. */ for (i = _physmap_idx; i > insert_idx; i -= 2) { physmap[i] = physmap[i - 2]; physmap[i + 1] = physmap[i - 1]; } /* Insert the new entry. */ physmap[insert_idx] = base; physmap[insert_idx + 1] = base + length; printf("physmap[%d] = 0x%016lx\n", insert_idx, base); printf("physmap[%d] = 0x%016lx\n", insert_idx + 1, base + length); return (1); } #ifdef FDT static void try_load_dtb(caddr_t kmdp) { vm_offset_t dtbp; dtbp = (vm_offset_t)&fdt_static_dtb; if (dtbp == (vm_offset_t)NULL) { printf("ERROR loading DTB\n"); return; } if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); } #endif static void cache_setup(void) { /* TODO */ } /* * Fake up a boot descriptor table. * RISCVTODO: This needs to be done via loader (when it's available). */ vm_offset_t fake_preload_metadata(struct riscv_bootparams *rvbp __unused) { #ifdef DDB vm_offset_t zstart = 0, zend = 0; #endif vm_offset_t lastaddr; int i = 0; static uint32_t fake_preload[35]; fake_preload[i++] = MODINFO_NAME; fake_preload[i++] = strlen("kernel") + 1; strcpy((char*)&fake_preload[i++], "kernel"); i += 1; fake_preload[i++] = MODINFO_TYPE; fake_preload[i++] = strlen("elf64 kernel") + 1; strcpy((char*)&fake_preload[i++], "elf64 kernel"); i += 3; fake_preload[i++] = MODINFO_ADDR; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = (uint64_t)(KERNBASE + KERNENTRY); i += 1; fake_preload[i++] = MODINFO_SIZE; fake_preload[i++] = sizeof(uint64_t); printf("end is 0x%016lx\n", (uint64_t)&end); fake_preload[i++] = (uint64_t)&end - (uint64_t)(KERNBASE + KERNENTRY); i += 1; #ifdef DDB #if 0 /* RISCVTODO */ if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) { fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4); fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8); lastaddr = *(uint32_t *)(KERNVIRTADDR + 8); zend = lastaddr; zstart = *(uint32_t *)(KERNVIRTADDR + 4); db_fetch_ksymtab(zstart, zend); } else #endif #endif lastaddr = (vm_offset_t)&end; fake_preload[i++] = 0; fake_preload[i] = 0; preload_metadata = (void *)fake_preload; return (lastaddr); } void initriscv(struct riscv_bootparams *rvbp) { + struct mem_region mem_regions[FDT_MEM_REGIONS]; vm_offset_t lastaddr; + int mem_regions_sz; vm_size_t kernlen; + uint32_t memsize; caddr_t kmdp; + int i; /* Set the module data location */ lastaddr = fake_preload_metadata(rvbp); /* Find the kernel address */ kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); boothowto = 0; kern_envp = NULL; #ifdef FDT try_load_dtb(kmdp); #endif /* Load the physical memory ranges */ physmap_idx = 0; - /* - * RISCVTODO: figure out whether platform provides ranges, - * or grab from FDT. - */ - add_physmap_entry(0, 0x8000000, physmap, &physmap_idx); + /* Grab physical memory regions information from device tree. */ + if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0) + panic("Cannot get physical memory regions"); + for (i = 0; i < mem_regions_sz; i++) + add_physmap_entry(mem_regions[i].mr_start, + mem_regions[i].mr_size, physmap, &physmap_idx); /* Set the pcpu data, this is needed by pmap_bootstrap */ pcpup = &__pcpu[0]; pcpu_init(pcpup, 0, sizeof(struct pcpu)); /* Set the pcpu pointer */ __asm __volatile("mv gp, %0" :: "r"(pcpup)); PCPU_SET(curthread, &thread0); /* Do basic tuning, hz etc */ init_param1(); cache_setup(); /* Bootstrap enough of pmap to enter the kernel proper */ kernlen = (lastaddr - KERNBASE); pmap_bootstrap(rvbp->kern_l1pt, KERNENTRY, kernlen); cninit(); init_proc0(rvbp->kern_stack); /* set page table base register for thread0 */ thread0.td_pcb->pcb_l1addr = (rvbp->kern_l1pt - KERNBASE); msgbufinit(msgbufp, msgbufsize); mutex_init(); init_param2(physmem); kdb_init(); riscv_init_interrupts(); early_boot = 0; } Index: projects/release-pkg/sys/riscv/riscv/stack_machdep.c =================================================================== --- projects/release-pkg/sys/riscv/riscv/stack_machdep.c (nonexistent) +++ projects/release-pkg/sys/riscv/riscv/stack_machdep.c (revision 295926) @@ -0,0 +1,63 @@ +/*- + * Copyright (c) 2016 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include + +#include +#include + +void +stack_save_td(struct stack *st, struct thread *td) +{ + +} + +int +stack_save_td_running(struct stack *st, struct thread *td) +{ + + return (EOPNOTSUPP); +} + +void +stack_save(struct stack *st) +{ + +} Property changes on: projects/release-pkg/sys/riscv/riscv/stack_machdep.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/release-pkg/sys/riscv/riscv/timer.c =================================================================== --- projects/release-pkg/sys/riscv/riscv/timer.c (revision 295925) +++ projects/release-pkg/sys/riscv/riscv/timer.c (revision 295926) @@ -1,298 +1,299 @@ /*- * Copyright (c) 2015 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * RISC-V Timer */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DEFAULT_FREQ 1000000 struct riscv_tmr_softc { struct resource *res[1]; void *ihl[1]; uint32_t clkfreq; struct eventtimer et; }; static struct riscv_tmr_softc *riscv_tmr_sc = NULL; static struct resource_spec timer_spec[] = { { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; static timecounter_get_t riscv_tmr_get_timecount; static struct timecounter riscv_tmr_timecount = { .tc_name = "RISC-V Timecounter", .tc_get_timecount = riscv_tmr_get_timecount, .tc_poll_pps = NULL, .tc_counter_mask = ~0u, .tc_frequency = 0, .tc_quality = 1000, }; static long get_counts(void) { return (csr_read(stime)); } static unsigned riscv_tmr_get_timecount(struct timecounter *tc) { return (get_counts()); } static int riscv_tmr_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { struct riscv_tmr_softc *sc; int counts; sc = (struct riscv_tmr_softc *)et->et_priv; if (first != 0) { counts = ((uint32_t)et->et_frequency * first) >> 32; machine_command(ECALL_MTIMECMP, counts); return (0); } return (EINVAL); } static int riscv_tmr_stop(struct eventtimer *et) { struct riscv_tmr_softc *sc; sc = (struct riscv_tmr_softc *)et->et_priv; /* TODO */ return (0); } static int riscv_tmr_intr(void *arg) { struct riscv_tmr_softc *sc; sc = (struct riscv_tmr_softc *)arg; /* * Clear interrupt pending bit. - * Note sip register is unimplemented in Spike simulator, - * so use machine command to clear in mip. + * Note: SIP_STIP bit is not implemented in sip register + * in Spike simulator, so use machine command to clear + * interrupt pending bit in mip. */ machine_command(ECALL_CLEAR_PENDING, 0); if (sc->et.et_active) sc->et.et_event_cb(&sc->et, sc->et.et_arg); return (FILTER_HANDLED); } static int riscv_tmr_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "riscv,timer")) { device_set_desc(dev, "RISC-V Timer"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int riscv_tmr_attach(device_t dev) { struct riscv_tmr_softc *sc; phandle_t node; pcell_t clock; int error; sc = device_get_softc(dev); if (riscv_tmr_sc) return (ENXIO); /* Get the base clock frequency */ node = ofw_bus_get_node(dev); if (node > 0) { error = OF_getprop(node, "clock-frequency", &clock, sizeof(clock)); if (error > 0) { sc->clkfreq = fdt32_to_cpu(clock); } } if (sc->clkfreq == 0) sc->clkfreq = DEFAULT_FREQ; if (sc->clkfreq == 0) { device_printf(dev, "No clock frequency specified\n"); return (ENXIO); } if (bus_alloc_resources(dev, timer_spec, sc->res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } riscv_tmr_sc = sc; /* Setup IRQs handler */ error = bus_setup_intr(dev, sc->res[0], INTR_TYPE_CLK, riscv_tmr_intr, NULL, sc, &sc->ihl[0]); if (error) { device_printf(dev, "Unable to alloc int resource.\n"); return (ENXIO); } riscv_tmr_timecount.tc_frequency = sc->clkfreq; tc_init(&riscv_tmr_timecount); sc->et.et_name = "RISC-V Eventtimer"; sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU; sc->et.et_quality = 1000; sc->et.et_frequency = sc->clkfreq; sc->et.et_min_period = (0x00000002LLU << 32) / sc->et.et_frequency; sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency; sc->et.et_start = riscv_tmr_start; sc->et.et_stop = riscv_tmr_stop; sc->et.et_priv = sc; et_register(&sc->et); return (0); } static device_method_t riscv_tmr_fdt_methods[] = { DEVMETHOD(device_probe, riscv_tmr_fdt_probe), DEVMETHOD(device_attach, riscv_tmr_attach), { 0, 0 } }; static driver_t riscv_tmr_fdt_driver = { "timer", riscv_tmr_fdt_methods, sizeof(struct riscv_tmr_softc), }; static devclass_t riscv_tmr_fdt_devclass; EARLY_DRIVER_MODULE(timer, simplebus, riscv_tmr_fdt_driver, riscv_tmr_fdt_devclass, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); EARLY_DRIVER_MODULE(timer, ofwbus, riscv_tmr_fdt_driver, riscv_tmr_fdt_devclass, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); void DELAY(int usec) { int32_t counts, counts_per_usec; uint32_t first, last; /* * Check the timers are setup, if not just * use a for loop for the meantime */ if (riscv_tmr_sc == NULL) { for (; usec > 0; usec--) for (counts = 200; counts > 0; counts--) /* * Prevent the compiler from optimizing * out the loop */ cpufunc_nullop(); return; } /* Get the number of times to count */ counts_per_usec = ((riscv_tmr_timecount.tc_frequency / 1000000) + 1); /* * Clamp the timeout at a maximum value (about 32 seconds with * a 66MHz clock). *Nobody* should be delay()ing for anywhere * near that length of time and if they are, they should be hung * out to dry. */ if (usec >= (0x80000000U / counts_per_usec)) counts = (0x80000000U / counts_per_usec) - 1; else counts = usec * counts_per_usec; first = get_counts(); while (counts > 0) { last = get_counts(); counts -= (int32_t)(last - first); first = last; } } Index: projects/release-pkg/sys/riscv/riscv/trap.c =================================================================== --- projects/release-pkg/sys/riscv/riscv/trap.c (revision 295925) +++ projects/release-pkg/sys/riscv/riscv/trap.c (revision 295926) @@ -1,310 +1,333 @@ /*- * Copyright (c) 2015 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include extern register_t fsu_intr_fault; /* Called from exception.S */ void do_trap_supervisor(struct trapframe *); void do_trap_user(struct trapframe *); static __inline void call_trapsignal(struct thread *td, int sig, int code, void *addr) { ksiginfo_t ksi; ksiginfo_init_trap(&ksi); ksi.ksi_signo = sig; ksi.ksi_code = code; ksi.ksi_addr = addr; trapsignal(td, &ksi); } int cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa) { struct proc *p; register_t *ap; int nap; nap = 8; p = td->td_proc; ap = &td->td_frame->tf_a[0]; sa->code = td->td_frame->tf_t[0]; if (sa->code == SYS_syscall || sa->code == SYS___syscall) { sa->code = *ap++; nap--; } if (p->p_sysent->sv_mask) sa->code &= p->p_sysent->sv_mask; if (sa->code >= p->p_sysent->sv_size) sa->callp = &p->p_sysent->sv_table[0]; else sa->callp = &p->p_sysent->sv_table[sa->code]; sa->narg = sa->callp->sy_narg; memcpy(sa->args, ap, nap * sizeof(register_t)); if (sa->narg > nap) panic("TODO: Could we have more then 8 args?"); td->td_retval[0] = 0; td->td_retval[1] = 0; return (0); } #include "../../kern/subr_syscall.c" static void dump_regs(struct trapframe *frame) { int n; int i; n = (sizeof(frame->tf_t) / sizeof(frame->tf_t[0])); for (i = 0; i < n; i++) printf("t[%d] == 0x%016lx\n", i, frame->tf_t[i]); n = (sizeof(frame->tf_s) / sizeof(frame->tf_s[0])); for (i = 0; i < n; i++) printf("s[%d] == 0x%016lx\n", i, frame->tf_s[i]); n = (sizeof(frame->tf_a) / sizeof(frame->tf_a[0])); for (i = 0; i < n; i++) printf("a[%d] == 0x%016lx\n", i, frame->tf_a[i]); printf("sepc == 0x%016lx\n", frame->tf_sepc); printf("sstatus == 0x%016lx\n", frame->tf_sstatus); } static void svc_handler(struct trapframe *frame) { struct syscall_args sa; struct thread *td; int error; td = curthread; td->td_frame = frame; error = syscallenter(td, &sa); syscallret(td, error, &sa); } static void data_abort(struct trapframe *frame, int lower) { struct vm_map *map; uint64_t sbadaddr; struct thread *td; struct pcb *pcb; vm_prot_t ftype; vm_offset_t va; struct proc *p; int ucode; int error; int sig; td = curthread; pcb = td->td_pcb; /* * Special case for fuswintr and suswintr. These can't sleep so * handle them early on in the trap handler. */ if (__predict_false(pcb->pcb_onfault == (vm_offset_t)&fsu_intr_fault)) { frame->tf_sepc = pcb->pcb_onfault; return; } sbadaddr = frame->tf_sbadaddr; p = td->td_proc; if (lower) map = &td->td_proc->p_vmspace->vm_map; else { /* The top bit tells us which range to use */ if ((sbadaddr >> 63) == 1) map = kernel_map; else map = &td->td_proc->p_vmspace->vm_map; } va = trunc_page(sbadaddr); if (frame->tf_scause == EXCP_STORE_ACCESS_FAULT) { ftype = (VM_PROT_READ | VM_PROT_WRITE); } else { ftype = (VM_PROT_READ); } if (map != kernel_map) { /* * Keep swapout from messing with us during this * critical time. */ PROC_LOCK(p); ++p->p_lock; PROC_UNLOCK(p); /* Fault in the user page: */ error = vm_fault(map, va, ftype, VM_FAULT_NORMAL); PROC_LOCK(p); --p->p_lock; PROC_UNLOCK(p); } else { /* * Don't have to worry about process locking or stacks in the * kernel. */ error = vm_fault(map, va, ftype, VM_FAULT_NORMAL); } if (error != KERN_SUCCESS) { if (lower) { sig = SIGSEGV; if (error == KERN_PROTECTION_FAILURE) ucode = SEGV_ACCERR; else ucode = SEGV_MAPERR; call_trapsignal(td, sig, ucode, (void *)sbadaddr); } else { if (td->td_intr_nesting_level == 0 && pcb->pcb_onfault != 0) { frame->tf_a[0] = error; frame->tf_sepc = pcb->pcb_onfault; return; } dump_regs(frame); panic("vm_fault failed: %lx, va 0x%016lx", frame->tf_sepc, sbadaddr); } } if (lower) userret(td, frame); } void do_trap_supervisor(struct trapframe *frame) { uint64_t exception; exception = (frame->tf_scause & EXCP_MASK); if (frame->tf_scause & EXCP_INTR) { /* Interrupt */ riscv_cpu_intr(frame); return; } CTR3(KTR_TRAP, "do_trap_supervisor: curthread: %p, sepc: %lx, frame: %p", curthread, frame->tf_sepc, frame); switch(exception) { case EXCP_LOAD_ACCESS_FAULT: case EXCP_STORE_ACCESS_FAULT: case EXCP_INSTR_ACCESS_FAULT: data_abort(frame, 0); break; + case EXCP_INSTR_BREAKPOINT: +#ifdef KDB + kdb_trap(exception, 0, frame); +#else + dump_regs(frame); + panic("No debugger in kernel.\n"); +#endif + case EXCP_INSTR_ILLEGAL: + dump_regs(frame); + panic("Illegal instruction at %x\n", frame->tf_sepc); + break; default: dump_regs(frame); panic("Unknown kernel exception %x badaddr %lx\n", exception, frame->tf_sbadaddr); } } void do_trap_user(struct trapframe *frame) { uint64_t exception; + struct thread *td; + td = curthread; + td->td_frame = frame; + exception = (frame->tf_scause & EXCP_MASK); if (frame->tf_scause & EXCP_INTR) { /* Interrupt */ riscv_cpu_intr(frame); return; } CTR3(KTR_TRAP, "do_trap_user: curthread: %p, sepc: %lx, frame: %p", curthread, frame->tf_sepc, frame); switch(exception) { case EXCP_LOAD_ACCESS_FAULT: case EXCP_STORE_ACCESS_FAULT: case EXCP_INSTR_ACCESS_FAULT: data_abort(frame, 1); break; case EXCP_UMODE_ENV_CALL: frame->tf_sepc += 4; /* Next instruction */ svc_handler(frame); + break; + case EXCP_INSTR_ILLEGAL: + call_trapsignal(td, SIGILL, ILL_ILLTRP, (void *)frame->tf_sepc); + userret(td, frame); + break; + case EXCP_INSTR_BREAKPOINT: + call_trapsignal(td, SIGTRAP, TRAP_BRKPT, (void *)frame->tf_sepc); + userret(td, frame); break; default: dump_regs(frame); panic("Unknown userland exception %x badaddr %lx\n", exception, frame->tf_sbadaddr); } } Index: projects/release-pkg/sys =================================================================== --- projects/release-pkg/sys (revision 295925) +++ projects/release-pkg/sys (revision 295926) Property changes on: projects/release-pkg/sys ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys:r295886-295925 Index: projects/release-pkg/usr.sbin/iostat/iostat.c =================================================================== --- projects/release-pkg/usr.sbin/iostat/iostat.c (revision 295925) +++ projects/release-pkg/usr.sbin/iostat/iostat.c (revision 295926) @@ -1,1024 +1,1024 @@ /* * Copyright (c) 1997, 1998, 2000, 2001 Kenneth D. Merry * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Parts of this program are derived from the original FreeBSD iostat * program: */ /*- * Copyright (c) 1986, 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Ideas for the new iostat statistics output modes taken from the NetBSD * version of iostat: */ /* * Copyright (c) 1996 John M. Vinopal * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project * by John M. Vinopal. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static struct nlist namelist[] = { #define X_TTY_NIN 0 { .n_name = "_tty_nin", .n_type = 0, .n_other = 0, .n_desc = 0, .n_value = 0 }, #define X_TTY_NOUT 1 { .n_name = "_tty_nout", .n_type = 0, .n_other = 0, .n_desc = 0, .n_value = 0 }, #define X_BOOTTIME 2 { .n_name = "_boottime", .n_type = 0, .n_other = 0, .n_desc = 0, .n_value = 0 }, #define X_END 2 { .n_name = NULL, .n_type = 0, .n_other = 0, .n_desc = 0, .n_value = 0 }, }; #define IOSTAT_DEFAULT_ROWS 20 /* Traditional default `wrows' */ static struct statinfo cur, last; static int num_devices; static struct device_selection *dev_select; static int maxshowdevs; static volatile sig_atomic_t headercount; static volatile sig_atomic_t wresized; /* Tty resized, when non-zero. */ static volatile sig_atomic_t alarm_rang; static volatile sig_atomic_t return_requested; static unsigned short wrows; /* Current number of tty rows. */ static int dflag = 0, Iflag = 0, Cflag = 0, Tflag = 0, oflag = 0, Kflag = 0; static int xflag = 0, zflag = 0; /* local function declarations */ static void usage(void); static void needhdr(int signo); static void needresize(int signo); static void needreturn(int signo); static void alarm_clock(int signo); static void doresize(void); static void phdr(void); static void devstats(int perf_select, long double etime, int havelast); static void cpustats(void); static int readvar(kvm_t *kd, const char *name, int nlid, void *ptr, size_t len); static void usage(void) { /* * We also support the following 'traditional' syntax: * iostat [drives] [wait [count]] * This isn't mentioned in the man page, or the usage statement, * but it is supported. */ fprintf(stderr, "usage: iostat [-CdhIKoTxz?] [-c count] [-M core]" " [-n devs] [-N system]\n" "\t [-t type,if,pass] [-w wait] [drives]\n"); } int main(int argc, char **argv) { int c, i; int tflag = 0, hflag = 0, cflag = 0, wflag = 0, nflag = 0; int count = 0, waittime = 0; char *memf = NULL, *nlistf = NULL; struct devstat_match *matches; struct itimerval alarmspec; int num_matches = 0; char errbuf[_POSIX2_LINE_MAX]; kvm_t *kd = NULL; long generation; int num_devices_specified; int num_selected, num_selections; long select_generation; char **specified_devices; devstat_select_mode select_mode; float f; int havelast = 0; matches = NULL; maxshowdevs = 3; while ((c = getopt(argc, argv, "c:CdhIKM:n:N:ot:Tw:xz?")) != -1) { switch(c) { case 'c': cflag++; count = atoi(optarg); if (count < 1) errx(1, "count %d is < 1", count); break; case 'C': Cflag++; break; case 'd': dflag++; break; case 'h': hflag++; break; case 'I': Iflag++; break; case 'K': Kflag++; break; case 'M': memf = optarg; break; case 'n': nflag++; maxshowdevs = atoi(optarg); if (maxshowdevs < 0) errx(1, "number of devices %d is < 0", maxshowdevs); break; case 'N': nlistf = optarg; break; case 'o': oflag++; break; case 't': tflag++; if (devstat_buildmatch(optarg, &matches, &num_matches) != 0) errx(1, "%s", devstat_errbuf); break; case 'T': Tflag++; break; case 'w': wflag++; f = atof(optarg); waittime = f * 1000; if (waittime < 1) errx(1, "wait time is < 1ms"); break; case 'x': xflag++; break; case 'z': zflag++; break; default: usage(); exit(1); break; } } argc -= optind; argv += optind; if (nlistf != NULL || memf != NULL) { kd = kvm_openfiles(nlistf, memf, NULL, O_RDONLY, errbuf); if (kd == NULL) errx(1, "kvm_openfiles: %s", errbuf); if (kvm_nlist(kd, namelist) == -1) errx(1, "kvm_nlist: %s", kvm_geterr(kd)); } /* * Make sure that the userland devstat version matches the kernel * devstat version. If not, exit and print a message informing * the user of his mistake. */ if (devstat_checkversion(kd) < 0) errx(1, "%s", devstat_errbuf); /* * Make sure Tflag and/or Cflag are set if dflag == 0. If dflag is * greater than 0, they may be 0 or non-zero. */ if (dflag == 0 && xflag == 0) { Cflag = 1; Tflag = 1; } /* find out how many devices we have */ if ((num_devices = devstat_getnumdevs(kd)) < 0) err(1, "can't get number of devices"); /* * Figure out how many devices we should display. */ if (nflag == 0) { if (xflag > 0) maxshowdevs = num_devices; else if (oflag > 0) { if ((dflag > 0) && (Cflag == 0) && (Tflag == 0)) maxshowdevs = 5; else if ((dflag > 0) && (Tflag > 0) && (Cflag == 0)) maxshowdevs = 5; else maxshowdevs = 4; } else { if ((dflag > 0) && (Cflag == 0)) maxshowdevs = 4; else maxshowdevs = 3; } } cur.dinfo = (struct devinfo *)calloc(1, sizeof(struct devinfo)); if (cur.dinfo == NULL) err(1, "calloc failed"); last.dinfo = (struct devinfo *)calloc(1, sizeof(struct devinfo)); if (last.dinfo == NULL) err(1, "calloc failed"); /* * Grab all the devices. We don't look to see if the list has * changed here, since it almost certainly has. We only look for * errors. */ if (devstat_getdevs(kd, &cur) == -1) errx(1, "%s", devstat_errbuf); num_devices = cur.dinfo->numdevs; generation = cur.dinfo->generation; /* * If the user specified any devices on the command line, see if * they are in the list of devices we have now. */ specified_devices = (char **)malloc(sizeof(char *)); if (specified_devices == NULL) err(1, "malloc failed"); for (num_devices_specified = 0; *argv; ++argv) { if (isdigit(**argv)) break; num_devices_specified++; specified_devices = (char **)realloc(specified_devices, sizeof(char *) * num_devices_specified); if (specified_devices == NULL) err(1, "realloc failed"); specified_devices[num_devices_specified - 1] = *argv; } if (nflag == 0 && maxshowdevs < num_devices_specified) maxshowdevs = num_devices_specified; dev_select = NULL; if ((num_devices_specified == 0) && (num_matches == 0)) select_mode = DS_SELECT_ADD; else select_mode = DS_SELECT_ONLY; /* * At this point, selectdevs will almost surely indicate that the * device list has changed, so we don't look for return values of 0 * or 1. If we get back -1, though, there is an error. */ if (devstat_selectdevs(&dev_select, &num_selected, &num_selections, &select_generation, generation, cur.dinfo->devices, num_devices, matches, num_matches, specified_devices, num_devices_specified, select_mode, maxshowdevs, hflag) == -1) errx(1, "%s", devstat_errbuf); /* * Look for the traditional wait time and count arguments. */ if (*argv) { f = atof(*argv); waittime = f * 1000; /* Let the user know he goofed, but keep going anyway */ if (wflag != 0) warnx("discarding previous wait interval, using" " %g instead", waittime / 1000.0); wflag++; if (*++argv) { count = atoi(*argv); if (cflag != 0) warnx("discarding previous count, using %d" " instead", count); cflag++; } else count = -1; } /* * If the user specified a count, but not an interval, we default * to an interval of 1 second. */ if ((wflag == 0) && (cflag > 0)) waittime = 1 * 1000; /* * If the user specified a wait time, but not a count, we want to * go on ad infinitum. This can be redundant if the user uses the * traditional method of specifying the wait, since in that case we * already set count = -1 above. Oh well. */ if ((wflag > 0) && (cflag == 0)) count = -1; bzero(cur.cp_time, sizeof(cur.cp_time)); cur.tk_nout = 0; cur.tk_nin = 0; /* * Set the snap time to the system boot time (ie: zero), so the * stats are calculated since system boot. */ cur.snap_time = 0; /* * If the user stops the program (control-Z) and then resumes it, * print out the header again. */ (void)signal(SIGCONT, needhdr); /* * If our standard output is a tty, then install a SIGWINCH handler * and set wresized so that our first iteration through the main * iostat loop will peek at the terminal's current rows to find out * how many lines can fit in a screenful of output. */ if (isatty(fileno(stdout)) != 0) { wresized = 1; (void)signal(SIGWINCH, needresize); } else { wresized = 0; wrows = IOSTAT_DEFAULT_ROWS; } /* * Register a SIGINT handler so that we can print out final statistics * when we get that signal */ (void)signal(SIGINT, needreturn); /* * Register a SIGALRM handler to implement sleeps if the user uses the * -c or -w options */ (void)signal(SIGALRM, alarm_clock); alarmspec.it_interval.tv_sec = waittime / 1000; alarmspec.it_interval.tv_usec = 1000 * (waittime % 1000); alarmspec.it_value.tv_sec = waittime / 1000; alarmspec.it_value.tv_usec = 1000 * (waittime % 1000); setitimer(ITIMER_REAL, &alarmspec, NULL); for (headercount = 1;;) { struct devinfo *tmp_dinfo; long tmp; long double etime; sigset_t sigmask, oldsigmask; if (Tflag > 0) { if ((readvar(kd, "kern.tty_nin", X_TTY_NIN, &cur.tk_nin, sizeof(cur.tk_nin)) != 0) || (readvar(kd, "kern.tty_nout", X_TTY_NOUT, &cur.tk_nout, sizeof(cur.tk_nout))!= 0)) { Tflag = 0; warnx("disabling TTY statistics"); } } if (Cflag > 0) { if (kd == NULL) { if (readvar(kd, "kern.cp_time", 0, &cur.cp_time, sizeof(cur.cp_time)) != 0) Cflag = 0; } else { if (kvm_getcptime(kd, cur.cp_time) < 0) { warnx("kvm_getcptime: %s", kvm_geterr(kd)); Cflag = 0; } } if (Cflag == 0) warnx("disabling CPU time statistics"); } if (!--headercount) { phdr(); if (wresized != 0) doresize(); headercount = wrows; } tmp_dinfo = last.dinfo; last.dinfo = cur.dinfo; cur.dinfo = tmp_dinfo; last.snap_time = cur.snap_time; /* * Here what we want to do is refresh our device stats. * devstat_getdevs() returns 1 when the device list has changed. * If the device list has changed, we want to go through * the selection process again, in case a device that we * were previously displaying has gone away. */ switch (devstat_getdevs(kd, &cur)) { case -1: errx(1, "%s", devstat_errbuf); break; case 1: { int retval; num_devices = cur.dinfo->numdevs; generation = cur.dinfo->generation; retval = devstat_selectdevs(&dev_select, &num_selected, &num_selections, &select_generation, generation, cur.dinfo->devices, num_devices, matches, num_matches, specified_devices, num_devices_specified, select_mode, maxshowdevs, hflag); switch(retval) { case -1: errx(1, "%s", devstat_errbuf); break; case 1: phdr(); if (wresized != 0) doresize(); headercount = wrows; break; default: break; } break; } default: break; } /* * We only want to re-select devices if we're in 'top' * mode. This is the only mode where the devices selected * could actually change. */ if (hflag > 0) { int retval; retval = devstat_selectdevs(&dev_select, &num_selected, &num_selections, &select_generation, generation, cur.dinfo->devices, num_devices, matches, num_matches, specified_devices, num_devices_specified, select_mode, maxshowdevs, hflag); switch(retval) { case -1: errx(1,"%s", devstat_errbuf); break; case 1: phdr(); if (wresized != 0) doresize(); headercount = wrows; break; default: break; } } if (Tflag > 0) { tmp = cur.tk_nin; cur.tk_nin -= last.tk_nin; last.tk_nin = tmp; tmp = cur.tk_nout; cur.tk_nout -= last.tk_nout; last.tk_nout = tmp; } etime = cur.snap_time - last.snap_time; if (etime == 0.0) etime = 1.0; for (i = 0; i < CPUSTATES; i++) { tmp = cur.cp_time[i]; cur.cp_time[i] -= last.cp_time[i]; last.cp_time[i] = tmp; } if (xflag == 0 && Tflag > 0) printf("%4.0Lf %5.0Lf", cur.tk_nin / etime, cur.tk_nout / etime); devstats(hflag, etime, havelast); if (xflag == 0) { if (Cflag > 0) cpustats(); printf("\n"); } fflush(stdout); if ((count >= 0 && --count <= 0) || return_requested) break; /* * Use sigsuspend to safely sleep until either signal is * received */ alarm_rang = 0; sigemptyset(&sigmask); sigaddset(&sigmask, SIGINT); sigaddset(&sigmask, SIGALRM); sigprocmask(SIG_BLOCK, &sigmask, &oldsigmask); while (! (alarm_rang || return_requested) ) { sigsuspend(&oldsigmask); } sigprocmask(SIG_UNBLOCK, &sigmask, NULL); havelast = 1; } exit(0); } /* * Force a header to be prepended to the next output. */ void needhdr(int signo __unused) { headercount = 1; } /* * When the terminal is resized, force an update of the maximum number of rows * printed between each header repetition. Then force a new header to be * prepended to the next output. */ void needresize(int signo __unused) { wresized = 1; headercount = 1; } /* * Record the alarm so the main loop can break its sleep */ void alarm_clock(int signo __unused) { alarm_rang = 1; } /* * Request that the main loop exit soon */ void needreturn(int signo __unused) { return_requested = 1; } /* * Update the global `wrows' count of terminal rows. */ void doresize(void) { int status; struct winsize w; for (;;) { status = ioctl(fileno(stdout), TIOCGWINSZ, &w); if (status == -1 && errno == EINTR) continue; else if (status == -1) err(1, "ioctl"); if (w.ws_row > 3) wrows = w.ws_row - 3; else wrows = IOSTAT_DEFAULT_ROWS; break; } /* * Inhibit doresize() calls until we are rescheduled by SIGWINCH. */ wresized = 0; } static void phdr(void) { int i, printed; char devbuf[256]; /* * If xflag is set, we need a per-loop header, not a page header, so * just return. We'll print the header in devstats(). */ if (xflag > 0) return; if (Tflag > 0) (void)printf(" tty"); for (i = 0, printed=0;(i < num_devices) && (printed < maxshowdevs);i++){ int di; if ((dev_select[i].selected != 0) && (dev_select[i].selected <= maxshowdevs)) { di = dev_select[i].position; snprintf(devbuf, sizeof(devbuf), "%s%d", cur.dinfo->devices[di].device_name, cur.dinfo->devices[di].unit_number); if (oflag > 0) (void)printf("%13.6s ", devbuf); else printf("%16.6s ", devbuf); printed++; } } if (Cflag > 0) (void)printf(" cpu\n"); else (void)printf("\n"); if (Tflag > 0) (void)printf(" tin tout"); for (i=0, printed = 0;(i < num_devices) && (printed < maxshowdevs);i++){ if ((dev_select[i].selected != 0) && (dev_select[i].selected <= maxshowdevs)) { if (oflag > 0) { if (Iflag == 0) (void)printf(" sps tps msps "); else (void)printf(" blk xfr msps "); } else { if (Iflag == 0) printf(" KB/t tps MB/s "); else printf(" KB/t xfrs MB "); } printed++; } } if (Cflag > 0) (void)printf(" us ni sy in id\n"); else printf("\n"); } static void devstats(int perf_select, long double etime, int havelast) { int dn; long double transfers_per_second, transfers_per_second_read; long double transfers_per_second_write; long double kb_per_transfer, mb_per_second, mb_per_second_read; long double mb_per_second_write; u_int64_t total_bytes, total_transfers, total_blocks; u_int64_t total_bytes_read, total_transfers_read; u_int64_t total_bytes_write, total_transfers_write; long double busy_pct, busy_time; u_int64_t queue_len; long double total_mb, blocks_per_second, total_duration; long double ms_per_other, ms_per_read, ms_per_write, ms_per_transaction; int firstline = 1; - char *devname; + char *devicename; if (xflag > 0) { printf(" extended device statistics "); if (Tflag > 0) printf(" tty "); if (Cflag > 0) printf(" cpu "); printf("\n"); if (Iflag == 0) { printf("device r/s w/s kr/s kw/s " " ms/r ms/w ms/o ms/t qlen %%b "); } else { printf("device r/i w/i kr/i" " kw/i qlen tsvc_t/i sb/i "); } if (Tflag > 0) printf("tin tout "); if (Cflag > 0) printf("us ni sy in id "); printf("\n"); } for (dn = 0; dn < num_devices; dn++) { int di; if (((perf_select == 0) && (dev_select[dn].selected == 0)) || (dev_select[dn].selected > maxshowdevs)) continue; di = dev_select[dn].position; if (devstat_compute_statistics(&cur.dinfo->devices[di], havelast ? &last.dinfo->devices[di] : NULL, etime, DSM_TOTAL_BYTES, &total_bytes, DSM_TOTAL_BYTES_READ, &total_bytes_read, DSM_TOTAL_BYTES_WRITE, &total_bytes_write, DSM_TOTAL_TRANSFERS, &total_transfers, DSM_TOTAL_TRANSFERS_READ, &total_transfers_read, DSM_TOTAL_TRANSFERS_WRITE, &total_transfers_write, DSM_TOTAL_BLOCKS, &total_blocks, DSM_KB_PER_TRANSFER, &kb_per_transfer, DSM_TRANSFERS_PER_SECOND, &transfers_per_second, DSM_TRANSFERS_PER_SECOND_READ, &transfers_per_second_read, DSM_TRANSFERS_PER_SECOND_WRITE, &transfers_per_second_write, DSM_MB_PER_SECOND, &mb_per_second, DSM_MB_PER_SECOND_READ, &mb_per_second_read, DSM_MB_PER_SECOND_WRITE, &mb_per_second_write, DSM_BLOCKS_PER_SECOND, &blocks_per_second, DSM_MS_PER_TRANSACTION, &ms_per_transaction, DSM_MS_PER_TRANSACTION_READ, &ms_per_read, DSM_MS_PER_TRANSACTION_WRITE, &ms_per_write, DSM_MS_PER_TRANSACTION_OTHER, &ms_per_other, DSM_BUSY_PCT, &busy_pct, DSM_QUEUE_LENGTH, &queue_len, DSM_TOTAL_DURATION, &total_duration, DSM_TOTAL_BUSY_TIME, &busy_time, DSM_NONE) != 0) errx(1, "%s", devstat_errbuf); if (perf_select != 0) { dev_select[dn].bytes = total_bytes; if ((dev_select[dn].selected == 0) || (dev_select[dn].selected > maxshowdevs)) continue; } if (Kflag > 0 || xflag > 0) { int block_size = cur.dinfo->devices[di].block_size; total_blocks = total_blocks * (block_size ? block_size : 512) / 1024; } if (xflag > 0) { - if (asprintf(&devname, "%s%d", + if (asprintf(&devicename, "%s%d", cur.dinfo->devices[di].device_name, cur.dinfo->devices[di].unit_number) == -1) err(1, "asprintf"); /* * If zflag is set, skip any devices with zero I/O. */ if (zflag == 0 || transfers_per_second_read > 0.05 || transfers_per_second_write > 0.05 || mb_per_second_read > ((long double).0005)/1024 || mb_per_second_write > ((long double).0005)/1024 || busy_pct > 0.5) { if (Iflag == 0) printf("%-8.8s %5d %5d %8.1Lf " "%8.1Lf %5d %5d %5d %5d " "%4" PRIu64 " %3.0Lf ", - devname, + devicename, (int)transfers_per_second_read, (int)transfers_per_second_write, mb_per_second_read * 1024, mb_per_second_write * 1024, (int)ms_per_read, (int)ms_per_write, (int)ms_per_other, (int)ms_per_transaction, queue_len, busy_pct); else printf("%-8.8s %11.1Lf %11.1Lf " "%12.1Lf %12.1Lf %4" PRIu64 " %10.1Lf %9.1Lf ", - devname, + devicename, (long double)total_transfers_read, (long double)total_transfers_write, (long double) total_bytes_read / 1024, (long double) total_bytes_write / 1024, queue_len, total_duration, busy_time); if (firstline) { /* * If this is the first device * we're printing, also print * CPU or TTY stats if requested. */ firstline = 0; if (Tflag > 0) printf("%4.0Lf%5.0Lf", cur.tk_nin / etime, cur.tk_nout / etime); if (Cflag > 0) cpustats(); } printf("\n"); } - free(devname); + free(devicename); } else if (oflag > 0) { int msdig = (ms_per_transaction < 100.0) ? 1 : 0; if (Iflag == 0) printf("%4.0Lf%4.0Lf%5.*Lf ", blocks_per_second, transfers_per_second, msdig, ms_per_transaction); else printf("%4.1" PRIu64 "%4.1" PRIu64 "%5.*Lf ", total_blocks, total_transfers, msdig, ms_per_transaction); } else { if (Iflag == 0) printf(" %5.2Lf %3.0Lf %5.2Lf ", kb_per_transfer, transfers_per_second, mb_per_second); else { total_mb = total_bytes; total_mb /= 1024 * 1024; printf(" %5.2Lf %3.1" PRIu64 " %5.2Lf ", kb_per_transfer, total_transfers, total_mb); } } } if (xflag > 0 && zflag > 0 && firstline == 1 && (Tflag > 0 || Cflag > 0)) { /* * If zflag is set and we did not print any device * lines I/O because they were all zero, * print TTY/CPU stats. */ printf("%52s",""); if (Tflag > 0) printf("%4.0Lf %5.0Lf", cur.tk_nin / etime, cur.tk_nout / etime); if (Cflag > 0) cpustats(); printf("\n"); } } static void cpustats(void) { int state; - double time; + double cptime; - time = 0.0; + cptime = 0.0; for (state = 0; state < CPUSTATES; ++state) - time += cur.cp_time[state]; + cptime += cur.cp_time[state]; for (state = 0; state < CPUSTATES; ++state) printf(" %2.0f", - rint(100. * cur.cp_time[state] / (time ? time : 1))); + rint(100. * cur.cp_time[state] / (cptime ? cptime : 1))); } static int readvar(kvm_t *kd, const char *name, int nlid, void *ptr, size_t len) { if (kd != NULL) { ssize_t nbytes; nbytes = kvm_read(kd, namelist[nlid].n_value, ptr, len); if (nbytes < 0) { warnx("kvm_read(%s): %s", namelist[nlid].n_name, kvm_geterr(kd)); return (1); } else if ((size_t)nbytes != len) { warnx("kvm_read(%s): expected %zu bytes, got %zd bytes", namelist[nlid].n_name, len, nbytes); return (1); } } else { size_t nlen = len; if (sysctlbyname(name, ptr, &nlen, NULL, 0) == -1) { warn("sysctl(%s...) failed", name); return (1); } if (nlen != len) { warnx("sysctl(%s...): expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); return (1); } } return (0); } Index: projects/release-pkg/usr.sbin/pwd_mkdb/pwd_mkdb.c =================================================================== --- projects/release-pkg/usr.sbin/pwd_mkdb/pwd_mkdb.c (revision 295925) +++ projects/release-pkg/usr.sbin/pwd_mkdb/pwd_mkdb.c (revision 295926) @@ -1,784 +1,784 @@ /*- * Copyright (c) 1991, 1993, 1994 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if 0 #ifndef lint static const char copyright[] = "@(#) Copyright (c) 1991, 1993, 1994\n\ The Regents of the University of California. All rights reserved.\n"; #endif /* not lint */ #ifndef lint static char sccsid[] = "@(#)pwd_mkdb.c 8.5 (Berkeley) 4/20/94"; #endif /* not lint */ #endif #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pw_scan.h" #define INSECURE 1 #define SECURE 2 #define PERM_INSECURE (S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH) #define PERM_SECURE (S_IRUSR|S_IWUSR) #define LEGACY_VERSION(x) _PW_VERSIONED(x, 3) #define CURRENT_VERSION(x) _PW_VERSIONED(x, 4) static HASHINFO openinfo = { 4096, /* bsize */ 32, /* ffactor */ 256, /* nelem */ 2048 * 1024, /* cachesize */ NULL, /* hash() */ BYTE_ORDER /* lorder */ }; static enum state { FILE_INSECURE, FILE_SECURE, FILE_ORIG } clean; static struct passwd pwd; /* password structure */ static char *pname; /* password file name */ static char prefix[MAXPATHLEN]; static int is_comment; /* flag for comments */ static char line[LINE_MAX]; void cleanup(void); void error(const char *); void cp(char *, char *, mode_t mode); void mv(char *, char *); int scan(FILE *, struct passwd *); static void usage(void); int main(int argc, char *argv[]) { static char verskey[] = _PWD_VERSION_KEY; char version = _PWD_CURRENT_VERSION; DB *dp, *sdp, *pw_db; DBT data, sdata, key; FILE *fp, *oldfp; sigset_t set; int ch, cnt, ypcnt, makeold, tfd, yp_enabled = 0; unsigned int len; uint32_t store; const char *t; char *p; char buf[MAX(MAXPATHLEN, LINE_MAX * 2)], tbuf[1024]; char sbuf[MAX(MAXPATHLEN, LINE_MAX * 2)]; char buf2[MAXPATHLEN]; char sbuf2[MAXPATHLEN]; char *username; u_int method, methoduid; int Cflag, dflag, iflag, lflag; int nblock = 0; iflag = dflag = Cflag = lflag = 0; strcpy(prefix, _PATH_PWD); makeold = 0; username = NULL; oldfp = NULL; while ((ch = getopt(argc, argv, "BCLlNd:ips:u:v")) != -1) switch(ch) { case 'B': /* big-endian output */ openinfo.lorder = BIG_ENDIAN; break; case 'C': /* verify only */ Cflag = 1; break; case 'l': /* generate legacy entries */ lflag = 1; break; case 'L': /* little-endian output */ openinfo.lorder = LITTLE_ENDIAN; break; case 'N': /* do not wait for lock */ nblock = LOCK_NB; /* will fail if locked */ break; case 'd': dflag++; strlcpy(prefix, optarg, sizeof(prefix)); break; case 'i': iflag++; break; case 'p': /* create V7 "file.orig" */ makeold = 1; break; case 's': /* change default cachesize */ openinfo.cachesize = atoi(optarg) * 1024 * 1024; break; case 'u': /* only update this record */ username = optarg; break; case 'v': /* backward compatible */ break; default: usage(); } argc -= optind; argv += optind; if (argc != 1 || (username && (*username == '+' || *username == '-'))) usage(); /* * This could be changed to allow the user to interrupt. * Probably not worth the effort. */ sigemptyset(&set); sigaddset(&set, SIGTSTP); sigaddset(&set, SIGHUP); sigaddset(&set, SIGINT); sigaddset(&set, SIGQUIT); sigaddset(&set, SIGTERM); (void)sigprocmask(SIG_BLOCK, &set, (sigset_t *)NULL); /* We don't care what the user wants. */ (void)umask(0); pname = *argv; /* * Open and lock the original password file. We have to check * the hardlink count after we get the lock to handle any potential * unlink/rename race. * * This lock is necessary when someone runs pwd_mkdb manually, directly * on master.passwd, to handle the case where a user might try to * change his password while pwd_mkdb is running. */ for (;;) { struct stat st; if (!(fp = fopen(pname, "r"))) error(pname); if (flock(fileno(fp), LOCK_EX|nblock) < 0 && !(dflag && iflag)) error("flock"); if (fstat(fileno(fp), &st) < 0) error(pname); if (st.st_nlink != 0) break; fclose(fp); fp = NULL; } /* check only if password database is valid */ if (Cflag) { while (scan(fp, &pwd)) if (!is_comment && strlen(pwd.pw_name) >= MAXLOGNAME) { warnx("%s: username too long", pwd.pw_name); exit(1); } exit(0); } /* Open the temporary insecure password database. */ (void)snprintf(buf, sizeof(buf), "%s/%s.tmp", prefix, _MP_DB); (void)snprintf(sbuf, sizeof(sbuf), "%s/%s.tmp", prefix, _SMP_DB); if (username) { int use_version; (void)snprintf(buf2, sizeof(buf2), "%s/%s", prefix, _MP_DB); (void)snprintf(sbuf2, sizeof(sbuf2), "%s/%s", prefix, _SMP_DB); clean = FILE_INSECURE; cp(buf2, buf, PERM_INSECURE); dp = dbopen(buf, - O_RDWR|O_EXCL|O_SYNC, PERM_INSECURE, DB_HASH, &openinfo); + O_RDWR|O_EXCL, PERM_INSECURE, DB_HASH, &openinfo); if (dp == NULL) error(buf); clean = FILE_SECURE; cp(sbuf2, sbuf, PERM_SECURE); sdp = dbopen(sbuf, - O_RDWR|O_EXCL|O_SYNC, PERM_SECURE, DB_HASH, &openinfo); + O_RDWR|O_EXCL, PERM_SECURE, DB_HASH, &openinfo); if (sdp == NULL) error(sbuf); /* * Do some trouble to check if we should store this users * uid. Don't use getpwnam/getpwuid as that interferes * with NIS. */ pw_db = dbopen(_PATH_MP_DB, O_RDONLY, 0, DB_HASH, NULL); if (!pw_db) error(_MP_DB); key.data = verskey; key.size = sizeof(verskey)-1; if ((pw_db->get)(pw_db, &key, &data, 0) == 0) use_version = *(unsigned char *)data.data; else use_version = 3; buf[0] = _PW_VERSIONED(_PW_KEYBYNAME, use_version); len = strlen(username); /* Only check that username fits in buffer */ memmove(buf + 1, username, MIN(len, sizeof(buf) - 1)); key.data = (u_char *)buf; key.size = len + 1; if ((pw_db->get)(pw_db, &key, &data, 0) == 0) { p = (char *)data.data; /* jump over pw_name and pw_passwd, to get to pw_uid */ while (*p++) ; while (*p++) ; buf[0] = _PW_VERSIONED(_PW_KEYBYUID, use_version); memmove(buf + 1, p, sizeof(store)); key.data = (u_char *)buf; key.size = sizeof(store) + 1; if ((pw_db->get)(pw_db, &key, &data, 0) == 0) { /* First field of data.data holds pw_pwname */ if (!strcmp(data.data, username)) methoduid = 0; else methoduid = R_NOOVERWRITE; } else { methoduid = R_NOOVERWRITE; } } else { methoduid = R_NOOVERWRITE; } if ((pw_db->close)(pw_db)) error("close pw_db"); method = 0; } else { dp = dbopen(buf, - O_RDWR|O_CREAT|O_EXCL|O_SYNC, PERM_INSECURE, DB_HASH, &openinfo); + O_RDWR|O_CREAT|O_EXCL, PERM_INSECURE, DB_HASH, &openinfo); if (dp == NULL) error(buf); clean = FILE_INSECURE; sdp = dbopen(sbuf, - O_RDWR|O_CREAT|O_EXCL|O_SYNC, PERM_SECURE, DB_HASH, &openinfo); + O_RDWR|O_CREAT|O_EXCL, PERM_SECURE, DB_HASH, &openinfo); if (sdp == NULL) error(sbuf); clean = FILE_SECURE; method = R_NOOVERWRITE; methoduid = R_NOOVERWRITE; } /* * Open file for old password file. Minor trickiness -- don't want to * chance the file already existing, since someone (stupidly) might * still be using this for permission checking. So, open it first and * fdopen the resulting fd. The resulting file should be readable by * everyone. */ if (makeold) { (void)snprintf(buf, sizeof(buf), "%s.orig", pname); if ((tfd = open(buf, O_WRONLY|O_CREAT|O_EXCL, PERM_INSECURE)) < 0) error(buf); if ((oldfp = fdopen(tfd, "w")) == NULL) error(buf); clean = FILE_ORIG; } /* * The databases actually contain three copies of the original data. * Each password file entry is converted into a rough approximation * of a ``struct passwd'', with the strings placed inline. This * object is then stored as the data for three separate keys. The * first key * is the pw_name field prepended by the _PW_KEYBYNAME * character. The second key is the pw_uid field prepended by the * _PW_KEYBYUID character. The third key is the line number in the * original file prepended by the _PW_KEYBYNUM character. (The special * characters are prepended to ensure that the keys do not collide.) */ /* In order to transition this file into a machine-independent * form, we have to change the format of entries. However, since * older binaries will still expect the old MD format entries, we * create those as usual and use versioned tags for the new entries. */ if (username == NULL) { /* Do not add the VERSION tag when updating a single * user. When operating on `old format' databases, this * would result in applications `seeing' only the updated * entries. */ key.data = verskey; key.size = sizeof(verskey)-1; data.data = &version; data.size = 1; if ((dp->put)(dp, &key, &data, 0) == -1) error("put"); if ((sdp->put)(sdp, &key, &data, 0) == -1) error("put"); } ypcnt = 0; data.data = (u_char *)buf; sdata.data = (u_char *)sbuf; key.data = (u_char *)tbuf; for (cnt = 1; scan(fp, &pwd); ++cnt) { if (!is_comment && (pwd.pw_name[0] == '+' || pwd.pw_name[0] == '-')) { yp_enabled = 1; ypcnt++; } if (is_comment) --cnt; #define COMPACT(e) t = e; while ((*p++ = *t++)); #define SCALAR(e) store = htonl((uint32_t)(e)); \ memmove(p, &store, sizeof(store)); \ p += sizeof(store); #define LSCALAR(e) store = HTOL((uint32_t)(e)); \ memmove(p, &store, sizeof(store)); \ p += sizeof(store); #define HTOL(e) (openinfo.lorder == BYTE_ORDER ? \ (uint32_t)(e) : \ bswap32((uint32_t)(e))) if (!is_comment && (!username || (strcmp(username, pwd.pw_name) == 0))) { /* Create insecure data. */ p = buf; COMPACT(pwd.pw_name); COMPACT("*"); SCALAR(pwd.pw_uid); SCALAR(pwd.pw_gid); SCALAR(pwd.pw_change); COMPACT(pwd.pw_class); COMPACT(pwd.pw_gecos); COMPACT(pwd.pw_dir); COMPACT(pwd.pw_shell); SCALAR(pwd.pw_expire); SCALAR(pwd.pw_fields); data.size = p - buf; /* Create secure data. */ p = sbuf; COMPACT(pwd.pw_name); COMPACT(pwd.pw_passwd); SCALAR(pwd.pw_uid); SCALAR(pwd.pw_gid); SCALAR(pwd.pw_change); COMPACT(pwd.pw_class); COMPACT(pwd.pw_gecos); COMPACT(pwd.pw_dir); COMPACT(pwd.pw_shell); SCALAR(pwd.pw_expire); SCALAR(pwd.pw_fields); sdata.size = p - sbuf; /* Store insecure by name. */ tbuf[0] = CURRENT_VERSION(_PW_KEYBYNAME); len = strlen(pwd.pw_name); memmove(tbuf + 1, pwd.pw_name, len); key.size = len + 1; if ((dp->put)(dp, &key, &data, method) == -1) error("put"); /* Store insecure by number. */ tbuf[0] = CURRENT_VERSION(_PW_KEYBYNUM); store = htonl(cnt); memmove(tbuf + 1, &store, sizeof(store)); key.size = sizeof(store) + 1; if ((dp->put)(dp, &key, &data, method) == -1) error("put"); /* Store insecure by uid. */ tbuf[0] = CURRENT_VERSION(_PW_KEYBYUID); store = htonl(pwd.pw_uid); memmove(tbuf + 1, &store, sizeof(store)); key.size = sizeof(store) + 1; if ((dp->put)(dp, &key, &data, methoduid) == -1) error("put"); /* Store secure by name. */ tbuf[0] = CURRENT_VERSION(_PW_KEYBYNAME); len = strlen(pwd.pw_name); memmove(tbuf + 1, pwd.pw_name, len); key.size = len + 1; if ((sdp->put)(sdp, &key, &sdata, method) == -1) error("put"); /* Store secure by number. */ tbuf[0] = CURRENT_VERSION(_PW_KEYBYNUM); store = htonl(cnt); memmove(tbuf + 1, &store, sizeof(store)); key.size = sizeof(store) + 1; if ((sdp->put)(sdp, &key, &sdata, method) == -1) error("put"); /* Store secure by uid. */ tbuf[0] = CURRENT_VERSION(_PW_KEYBYUID); store = htonl(pwd.pw_uid); memmove(tbuf + 1, &store, sizeof(store)); key.size = sizeof(store) + 1; if ((sdp->put)(sdp, &key, &sdata, methoduid) == -1) error("put"); /* Store insecure and secure special plus and special minus */ if (pwd.pw_name[0] == '+' || pwd.pw_name[0] == '-') { tbuf[0] = CURRENT_VERSION(_PW_KEYYPBYNUM); store = htonl(ypcnt); memmove(tbuf + 1, &store, sizeof(store)); key.size = sizeof(store) + 1; if ((dp->put)(dp, &key, &data, method) == -1) error("put"); if ((sdp->put)(sdp, &key, &sdata, method) == -1) error("put"); } if (lflag) { /* Create insecure data. (legacy version) */ p = buf; COMPACT(pwd.pw_name); COMPACT("*"); LSCALAR(pwd.pw_uid); LSCALAR(pwd.pw_gid); LSCALAR(pwd.pw_change); COMPACT(pwd.pw_class); COMPACT(pwd.pw_gecos); COMPACT(pwd.pw_dir); COMPACT(pwd.pw_shell); LSCALAR(pwd.pw_expire); LSCALAR(pwd.pw_fields); data.size = p - buf; /* Create secure data. (legacy version) */ p = sbuf; COMPACT(pwd.pw_name); COMPACT(pwd.pw_passwd); LSCALAR(pwd.pw_uid); LSCALAR(pwd.pw_gid); LSCALAR(pwd.pw_change); COMPACT(pwd.pw_class); COMPACT(pwd.pw_gecos); COMPACT(pwd.pw_dir); COMPACT(pwd.pw_shell); LSCALAR(pwd.pw_expire); LSCALAR(pwd.pw_fields); sdata.size = p - sbuf; /* Store insecure by name. */ tbuf[0] = LEGACY_VERSION(_PW_KEYBYNAME); len = strlen(pwd.pw_name); memmove(tbuf + 1, pwd.pw_name, len); key.size = len + 1; if ((dp->put)(dp, &key, &data, method) == -1) error("put"); /* Store insecure by number. */ tbuf[0] = LEGACY_VERSION(_PW_KEYBYNUM); store = HTOL(cnt); memmove(tbuf + 1, &store, sizeof(store)); key.size = sizeof(store) + 1; if ((dp->put)(dp, &key, &data, method) == -1) error("put"); /* Store insecure by uid. */ tbuf[0] = LEGACY_VERSION(_PW_KEYBYUID); store = HTOL(pwd.pw_uid); memmove(tbuf + 1, &store, sizeof(store)); key.size = sizeof(store) + 1; if ((dp->put)(dp, &key, &data, methoduid) == -1) error("put"); /* Store secure by name. */ tbuf[0] = LEGACY_VERSION(_PW_KEYBYNAME); len = strlen(pwd.pw_name); memmove(tbuf + 1, pwd.pw_name, len); key.size = len + 1; if ((sdp->put)(sdp, &key, &sdata, method) == -1) error("put"); /* Store secure by number. */ tbuf[0] = LEGACY_VERSION(_PW_KEYBYNUM); store = HTOL(cnt); memmove(tbuf + 1, &store, sizeof(store)); key.size = sizeof(store) + 1; if ((sdp->put)(sdp, &key, &sdata, method) == -1) error("put"); /* Store secure by uid. */ tbuf[0] = LEGACY_VERSION(_PW_KEYBYUID); store = HTOL(pwd.pw_uid); memmove(tbuf + 1, &store, sizeof(store)); key.size = sizeof(store) + 1; if ((sdp->put)(sdp, &key, &sdata, methoduid) == -1) error("put"); /* Store insecure and secure special plus and special minus */ if (pwd.pw_name[0] == '+' || pwd.pw_name[0] == '-') { tbuf[0] = LEGACY_VERSION(_PW_KEYYPBYNUM); store = HTOL(ypcnt); memmove(tbuf + 1, &store, sizeof(store)); key.size = sizeof(store) + 1; if ((dp->put)(dp, &key, &data, method) == -1) error("put"); if ((sdp->put)(sdp, &key, &sdata, method) == -1) error("put"); } } } /* Create original format password file entry */ if (is_comment && makeold){ /* copy comments */ if (fprintf(oldfp, "%s\n", line) < 0) error("write old"); } else if (makeold) { char uidstr[20]; char gidstr[20]; snprintf(uidstr, sizeof(uidstr), "%u", pwd.pw_uid); snprintf(gidstr, sizeof(gidstr), "%u", pwd.pw_gid); if (fprintf(oldfp, "%s:*:%s:%s:%s:%s:%s\n", pwd.pw_name, pwd.pw_fields & _PWF_UID ? uidstr : "", pwd.pw_fields & _PWF_GID ? gidstr : "", pwd.pw_gecos, pwd.pw_dir, pwd.pw_shell) < 0) error("write old"); } } /* If YP enabled, set flag. */ if (yp_enabled) { buf[0] = yp_enabled + 2; data.size = 1; key.size = 1; tbuf[0] = CURRENT_VERSION(_PW_KEYYPENABLED); if ((dp->put)(dp, &key, &data, method) == -1) error("put"); if ((sdp->put)(sdp, &key, &data, method) == -1) error("put"); if (lflag) { tbuf[0] = LEGACY_VERSION(_PW_KEYYPENABLED); key.size = 1; if ((dp->put)(dp, &key, &data, method) == -1) error("put"); if ((sdp->put)(sdp, &key, &data, method) == -1) error("put"); } } if ((dp->close)(dp) == -1) error("close"); if ((sdp->close)(sdp) == -1) error("close"); if (makeold) { (void)fflush(oldfp); if (fclose(oldfp) == EOF) error("close old"); } /* Set master.passwd permissions, in case caller forgot. */ (void)fchmod(fileno(fp), S_IRUSR|S_IWUSR); /* Install as the real password files. */ (void)snprintf(buf, sizeof(buf), "%s/%s.tmp", prefix, _MP_DB); (void)snprintf(buf2, sizeof(buf2), "%s/%s", prefix, _MP_DB); mv(buf, buf2); (void)snprintf(buf, sizeof(buf), "%s/%s.tmp", prefix, _SMP_DB); (void)snprintf(buf2, sizeof(buf2), "%s/%s", prefix, _SMP_DB); mv(buf, buf2); if (makeold) { (void)snprintf(buf2, sizeof(buf2), "%s/%s", prefix, _PASSWD); (void)snprintf(buf, sizeof(buf), "%s.orig", pname); mv(buf, buf2); } /* * Move the master password LAST -- chpass(1), passwd(1) and vipw(8) * all use flock(2) on it to block other incarnations of themselves. * The rename means that everything is unlocked, as the original file * can no longer be accessed. */ (void)snprintf(buf, sizeof(buf), "%s/%s", prefix, _MASTERPASSWD); mv(pname, buf); /* * Close locked password file after rename() */ if (fclose(fp) == EOF) error("close fp"); exit(0); } int scan(FILE *fp, struct passwd *pw) { static int lcnt; size_t len; char *p; p = fgetln(fp, &len); if (p == NULL) return (0); ++lcnt; /* * ``... if I swallow anything evil, put your fingers down my * throat...'' * -- The Who */ if (len > 0 && p[len - 1] == '\n') len--; if (len >= sizeof(line) - 1) { warnx("line #%d too long", lcnt); goto fmt; } memcpy(line, p, len); line[len] = '\0'; /* * Ignore comments: ^[ \t]*# */ for (p = line; *p != '\0'; p++) if (*p != ' ' && *p != '\t') break; if (*p == '#' || *p == '\0') { is_comment = 1; return(1); } else is_comment = 0; if (!__pw_scan(line, pw, _PWSCAN_WARN|_PWSCAN_MASTER)) { warnx("at line #%d", lcnt); fmt: errno = EFTYPE; /* XXX */ error(pname); } return (1); } void cp(char *from, char *to, mode_t mode) { static char buf[MAXBSIZE]; int from_fd, rcount, to_fd, wcount; if ((from_fd = open(from, O_RDONLY, 0)) < 0) error(from); if ((to_fd = open(to, O_WRONLY|O_CREAT|O_EXCL, mode)) < 0) error(to); while ((rcount = read(from_fd, buf, MAXBSIZE)) > 0) { wcount = write(to_fd, buf, rcount); if (rcount != wcount || wcount == -1) { int sverrno = errno; (void)snprintf(buf, sizeof(buf), "%s to %s", from, to); errno = sverrno; error(buf); } } if (rcount < 0) { int sverrno = errno; (void)snprintf(buf, sizeof(buf), "%s to %s", from, to); errno = sverrno; error(buf); } } void mv(char *from, char *to) { char buf[MAXPATHLEN]; char *to_dir; int to_dir_fd = -1; /* * Make sure file is safe on disk. To improve performance we will call * fsync() to the directory where file lies */ if (rename(from, to) != 0 || (to_dir = dirname(to)) == NULL || (to_dir_fd = open(to_dir, O_RDONLY|O_DIRECTORY)) == -1 || fsync(to_dir_fd) != 0) { int sverrno = errno; (void)snprintf(buf, sizeof(buf), "%s to %s", from, to); errno = sverrno; if (to_dir_fd != -1) close(to_dir_fd); error(buf); } if (to_dir_fd != -1) close(to_dir_fd); } void error(const char *name) { warn("%s", name); cleanup(); exit(1); } void cleanup(void) { char buf[MAXPATHLEN]; switch(clean) { case FILE_ORIG: (void)snprintf(buf, sizeof(buf), "%s.orig", pname); (void)unlink(buf); /* FALLTHROUGH */ case FILE_SECURE: (void)snprintf(buf, sizeof(buf), "%s/%s.tmp", prefix, _SMP_DB); (void)unlink(buf); /* FALLTHROUGH */ case FILE_INSECURE: (void)snprintf(buf, sizeof(buf), "%s/%s.tmp", prefix, _MP_DB); (void)unlink(buf); } } static void usage(void) { (void)fprintf(stderr, "usage: pwd_mkdb [-BCiLNp] [-d directory] [-s cachesize] [-u username] file\n"); exit(1); } Index: projects/release-pkg =================================================================== --- projects/release-pkg (revision 295925) +++ projects/release-pkg (revision 295926) Property changes on: projects/release-pkg ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r295886-295925