Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Thu, 6 Apr 2017 14:25:07 +0000 (07:25 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 6 Apr 2017 15:24:51 +0000 (08:24 -0700)
Mostly simple cases of overlapping changes (adding code nearby,
a function whose name changes, for example).

Signed-off-by: David S. Miller <davem@davemloft.net>
26 files changed:
1  2 
MAINTAINERS
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/ezchip/nps_enet.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/phy/phy.c
drivers/net/usb/r8152.c
include/linux/hyperv.h
include/net/sctp/structs.h
kernel/bpf/verifier.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/secure_seq.c
net/ipv4/ipconfig.c
net/ipv4/netfilter/nf_nat_snmp_basic.c
net/ipv4/tcp_input.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nfnetlink_cttimeout.c
net/sctp/outqueue.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sctp/stream.c
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/test_verifier.c

diff --cc MAINTAINERS
Simple merge
index 61e5741f935c212e3104a202555c91b965047b17,a41377e26c07d038aa6c8716ef52d7152e92bcf0..e2197160e4dcc70fe626f6f06c4d20d70925ebc7
@@@ -3295,9 -3275,10 +3295,10 @@@ void nfp_net_netdev_clean(struct net_de
  {
        struct nfp_net *nn = netdev_priv(netdev);
  
 -      unregister_netdev(nn->netdev);
++      unregister_netdev(nn->dp.netdev);
 -      if (nn->xdp_prog)
 -              bpf_prog_put(nn->xdp_prog);
 -      if (nn->bpf_offload_xdp)
 +      if (nn->dp.xdp_prog)
 +              bpf_prog_put(nn->dp.xdp_prog);
 +      if (nn->dp.bpf_offload_xdp)
                nfp_net_xdp_offload(nn, NULL);
-       unregister_netdev(nn->dp.netdev);
  }
Simple merge
index 4deced102f729475dd78a013fa897a24fb5bc81f,07f788c49d573fe9d4dc15e24b8f29449b4ecbe2..ddc62cb69be828a730e6ed32ecc9ee951fef8d3b
@@@ -3701,19 -3699,27 +3706,27 @@@ static int rtl8152_resume(struct usb_in
        if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
                tp->rtl_ops.init(tp);
                queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
 -              netif_device_attach(tp->netdev);
 +              netif_device_attach(netdev);
        }
  
 -      if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
 +      if (netif_running(netdev) && netdev->flags & IFF_UP) {
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
 +                      struct napi_struct *napi = &tp->napi;
 +
                        tp->rtl_ops.autosuspend_en(tp, false);
 -                      napi_disable(&tp->napi);
 +                      napi_disable(napi);
                        set_bit(WORK_ENABLE, &tp->flags);
-                       if (netif_carrier_ok(netdev))
-                               rtl_start_rx(tp);
 -
 -                      if (netif_carrier_ok(tp->netdev)) {
++                      if (netif_carrier_ok(netdev)) {
+                               if (rtl8152_get_speed(tp) & LINK_STATUS) {
+                                       rtl_start_rx(tp);
+                               } else {
 -                                      netif_carrier_off(tp->netdev);
++                                      netif_carrier_off(netdev);
+                                       tp->rtl_ops.disable(tp);
 -                                      netif_info(tp, link, tp->netdev,
++                                      netif_info(tp, link, netdev,
+                                                  "linking down\n");
+                               }
+                       }
 -
 -                      napi_enable(&tp->napi);
 +                      napi_enable(napi);
                        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
                        smp_mb__after_atomic();
                        if (!list_empty(&tp->rx_done))
Simple merge
Simple merge
Simple merge
index 5f3ae922fcd1d31580e1c3735d9b139d40212090,d98d4998213da6103665d62d5a85613631236f19..c9cf425303f84b6b5c3a12876d68435a531b6b30
@@@ -113,216 -113,6 +113,216 @@@ __be32 __skb_flow_get_ports(const struc
  }
  EXPORT_SYMBOL(__skb_flow_get_ports);
  
-       struct arphdr *_arp;
 +enum flow_dissect_ret {
 +      FLOW_DISSECT_RET_OUT_GOOD,
 +      FLOW_DISSECT_RET_OUT_BAD,
 +      FLOW_DISSECT_RET_OUT_PROTO_AGAIN,
 +};
 +
 +static enum flow_dissect_ret
 +__skb_flow_dissect_mpls(const struct sk_buff *skb,
 +                      struct flow_dissector *flow_dissector,
 +                      void *target_container, void *data, int nhoff, int hlen)
 +{
 +      struct flow_dissector_key_keyid *key_keyid;
 +      struct mpls_label *hdr, _hdr[2];
 +
 +      if (!dissector_uses_key(flow_dissector,
 +                              FLOW_DISSECTOR_KEY_MPLS_ENTROPY))
 +              return FLOW_DISSECT_RET_OUT_GOOD;
 +
 +      hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
 +                                 hlen, &_hdr);
 +      if (!hdr)
 +              return FLOW_DISSECT_RET_OUT_BAD;
 +
 +      if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
 +          MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
 +              key_keyid = skb_flow_dissector_target(flow_dissector,
 +                                                    FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
 +                                                    target_container);
 +              key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK);
 +      }
 +      return FLOW_DISSECT_RET_OUT_GOOD;
 +}
 +
 +static enum flow_dissect_ret
 +__skb_flow_dissect_arp(const struct sk_buff *skb,
 +                     struct flow_dissector *flow_dissector,
 +                     void *target_container, void *data, int nhoff, int hlen)
 +{
 +      struct flow_dissector_key_arp *key_arp;
 +      struct {
 +              unsigned char ar_sha[ETH_ALEN];
 +              unsigned char ar_sip[4];
 +              unsigned char ar_tha[ETH_ALEN];
 +              unsigned char ar_tip[4];
 +      } *arp_eth, _arp_eth;
 +      const struct arphdr *arp;
++      struct arphdr _arp;
 +
 +      if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
 +              return FLOW_DISSECT_RET_OUT_GOOD;
 +
 +      arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
 +                                 hlen, &_arp);
 +      if (!arp)
 +              return FLOW_DISSECT_RET_OUT_BAD;
 +
 +      if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
 +          arp->ar_pro != htons(ETH_P_IP) ||
 +          arp->ar_hln != ETH_ALEN ||
 +          arp->ar_pln != 4 ||
 +          (arp->ar_op != htons(ARPOP_REPLY) &&
 +           arp->ar_op != htons(ARPOP_REQUEST)))
 +              return FLOW_DISSECT_RET_OUT_BAD;
 +
 +      arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
 +                                     sizeof(_arp_eth), data,
 +                                     hlen, &_arp_eth);
 +      if (!arp_eth)
 +              return FLOW_DISSECT_RET_OUT_BAD;
 +
 +      key_arp = skb_flow_dissector_target(flow_dissector,
 +                                          FLOW_DISSECTOR_KEY_ARP,
 +                                          target_container);
 +
 +      memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
 +      memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
 +
 +      /* Only store the lower byte of the opcode;
 +       * this covers ARPOP_REPLY and ARPOP_REQUEST.
 +       */
 +      key_arp->op = ntohs(arp->ar_op) & 0xff;
 +
 +      ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
 +      ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
 +
 +      return FLOW_DISSECT_RET_OUT_GOOD;
 +}
 +
 +static enum flow_dissect_ret
 +__skb_flow_dissect_gre(const struct sk_buff *skb,
 +                     struct flow_dissector_key_control *key_control,
 +                     struct flow_dissector *flow_dissector,
 +                     void *target_container, void *data,
 +                     __be16 *p_proto, int *p_nhoff, int *p_hlen,
 +                     unsigned int flags)
 +{
 +      struct flow_dissector_key_keyid *key_keyid;
 +      struct gre_base_hdr *hdr, _hdr;
 +      int offset = 0;
 +      u16 gre_ver;
 +
 +      hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
 +                                 data, *p_hlen, &_hdr);
 +      if (!hdr)
 +              return FLOW_DISSECT_RET_OUT_BAD;
 +
 +      /* Only look inside GRE without routing */
 +      if (hdr->flags & GRE_ROUTING)
 +              return FLOW_DISSECT_RET_OUT_GOOD;
 +
 +      /* Only look inside GRE for version 0 and 1 */
 +      gre_ver = ntohs(hdr->flags & GRE_VERSION);
 +      if (gre_ver > 1)
 +              return FLOW_DISSECT_RET_OUT_GOOD;
 +
 +      *p_proto = hdr->protocol;
 +      if (gre_ver) {
 +              /* Version1 must be PPTP, and check the flags */
 +              if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
 +                      return FLOW_DISSECT_RET_OUT_GOOD;
 +      }
 +
 +      offset += sizeof(struct gre_base_hdr);
 +
 +      if (hdr->flags & GRE_CSUM)
 +              offset += sizeof(((struct gre_full_hdr *) 0)->csum) +
 +                        sizeof(((struct gre_full_hdr *) 0)->reserved1);
 +
 +      if (hdr->flags & GRE_KEY) {
 +              const __be32 *keyid;
 +              __be32 _keyid;
 +
 +              keyid = __skb_header_pointer(skb, *p_nhoff + offset,
 +                                           sizeof(_keyid),
 +                                           data, *p_hlen, &_keyid);
 +              if (!keyid)
 +                      return FLOW_DISSECT_RET_OUT_BAD;
 +
 +              if (dissector_uses_key(flow_dissector,
 +                                     FLOW_DISSECTOR_KEY_GRE_KEYID)) {
 +                      key_keyid = skb_flow_dissector_target(flow_dissector,
 +                                                            FLOW_DISSECTOR_KEY_GRE_KEYID,
 +                                                            target_container);
 +                      if (gre_ver == 0)
 +                              key_keyid->keyid = *keyid;
 +                      else
 +                              key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
 +              }
 +              offset += sizeof(((struct gre_full_hdr *) 0)->key);
 +      }
 +
 +      if (hdr->flags & GRE_SEQ)
 +              offset += sizeof(((struct pptp_gre_header *) 0)->seq);
 +
 +      if (gre_ver == 0) {
 +              if (*p_proto == htons(ETH_P_TEB)) {
 +                      const struct ethhdr *eth;
 +                      struct ethhdr _eth;
 +
 +                      eth = __skb_header_pointer(skb, *p_nhoff + offset,
 +                                                 sizeof(_eth),
 +                                                 data, *p_hlen, &_eth);
 +                      if (!eth)
 +                              return FLOW_DISSECT_RET_OUT_BAD;
 +                      *p_proto = eth->h_proto;
 +                      offset += sizeof(*eth);
 +
 +                      /* Cap headers that we access via pointers at the
 +                       * end of the Ethernet header as our maximum alignment
 +                       * at that point is only 2 bytes.
 +                       */
 +                      if (NET_IP_ALIGN)
 +                              *p_hlen = *p_nhoff + offset;
 +              }
 +      } else { /* version 1, must be PPTP */
 +              u8 _ppp_hdr[PPP_HDRLEN];
 +              u8 *ppp_hdr;
 +
 +              if (hdr->flags & GRE_ACK)
 +                      offset += sizeof(((struct pptp_gre_header *) 0)->ack);
 +
 +              ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
 +                                             sizeof(_ppp_hdr),
 +                                             data, *p_hlen, _ppp_hdr);
 +              if (!ppp_hdr)
 +                      return FLOW_DISSECT_RET_OUT_BAD;
 +
 +              switch (PPP_PROTOCOL(ppp_hdr)) {
 +              case PPP_IP:
 +                      *p_proto = htons(ETH_P_IP);
 +                      break;
 +              case PPP_IPV6:
 +                      *p_proto = htons(ETH_P_IPV6);
 +                      break;
 +              default:
 +                      /* Could probably catch some more like MPLS */
 +                      break;
 +              }
 +
 +              offset += PPP_HDRLEN;
 +      }
 +
 +      *p_nhoff += offset;
 +      key_control->flags |= FLOW_DIS_ENCAPSULATION;
 +      if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
 +              return FLOW_DISSECT_RET_OUT_GOOD;
 +
 +      return FLOW_DISSECT_RET_OUT_PROTO_AGAIN;
 +}
 +
  /**
   * __skb_flow_dissect - extract the flow_keys struct and return it
   * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
Simple merge
index fb87e78a2cc732ff5c75e5b2b0415c2fe805d990,d28da7d363f170f35d88623e2b864f04a67c3de5..6bd2f8fb0476baabf507557fc0d06b6787511c70
@@@ -45,8 -47,25 +47,25 @@@ static u32 seq_scale(u32 seq
  #endif
  
  #if IS_ENABLED(CONFIG_IPV6)
 -u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
 -                               __be16 sport, __be16 dport, u32 *tsoff)
+ static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
+ {
+       const struct {
+               struct in6_addr saddr;
+               struct in6_addr daddr;
+       } __aligned(SIPHASH_ALIGNMENT) combined = {
+               .saddr = *(struct in6_addr *)saddr,
+               .daddr = *(struct in6_addr *)daddr,
+       };
+       if (sysctl_tcp_timestamps != 1)
+               return 0;
+       return siphash(&combined, offsetofend(typeof(combined), daddr),
+                      &ts_secret);
+ }
 +u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr,
 +                             __be16 sport, __be16 dport, u32 *tsoff)
  {
        const struct {
                struct in6_addr saddr;
        net_secret_init();
        hash = siphash(&combined, offsetofend(typeof(combined), dport),
                       &net_secret);
-       *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0;
+       *tsoff = secure_tcpv6_ts_off(saddr, daddr);
        return seq_scale(hash);
  }
 -EXPORT_SYMBOL(secure_tcpv6_sequence_number);
 +EXPORT_SYMBOL(secure_tcpv6_seq_and_tsoff);
  
  u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
                               __be16 dport)
@@@ -88,8 -107,16 +107,16 @@@ EXPORT_SYMBOL(secure_ipv6_port_ephemera
  #endif
  
  #ifdef CONFIG_INET
+ static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
+ {
+       if (sysctl_tcp_timestamps != 1)
+               return 0;
+       return siphash_2u32((__force u32)saddr, (__force u32)daddr,
+                           &ts_secret);
+ }
  
 -/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
 +/* secure_tcp_seq_and_tsoff(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
   * but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
   * it would be easy enough to have the former function use siphash_4u32, passing
   * the arguments as separate u32.
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 32fb7a294f0fb01556f65338585e2a100835d425,9af09e8099c0aae9fd6acd5d42cc5afb949871e9..d8d94b9bd76c7c4fb62c8ebf50e0033de97012da
@@@ -1,12 -1,17 +1,19 @@@
  LIBDIR := ../../../lib
  BPFDIR := $(LIBDIR)/bpf
+ APIDIR := ../../../include/uapi
+ GENDIR := ../../../../include/generated
+ GENHDR := $(GENDIR)/autoconf.h
  
- CFLAGS += -Wall -O2 -I../../../include/uapi -I$(LIBDIR) -I../../../include
+ ifneq ($(wildcard $(GENHDR)),)
+   GENFLAGS := -DHAVE_GENHDR
+ endif
 -CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS)
 -LDLIBS += -lcap
++CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
 +LDLIBS += -lcap -lelf
  
 -TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map
 +TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs
 +
 +TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o
  
  TEST_PROGS := test_kmod.sh
  
index f4f43c98cf7f14951ba5a9f448cc427746549b7b,c848e90b64213128a248c9669a2a2796692c5776..0963f8ffd25c9f52a226301c747fff5db4679ff4
  
  #define MAX_INSNS     512
  #define MAX_FIXUPS    8
 +#define MAX_NR_MAPS   4
  
+ #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS    (1 << 0)
  struct bpf_test {
        const char *descr;
        struct bpf_insn insns[MAX_INSNS];
@@@ -4454,76 -4719,8 +4721,77 @@@ static struct bpf_test tests[] = 
                .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
                .result = REJECT,
                .result_unpriv = REJECT,
-       },
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 +      },
 +      {
 +              "map in map access",
 +              .insns = {
 +                      BPF_ST_MEM(0, BPF_REG_10, -4, 0),
 +                      BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
 +                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
 +                      BPF_LD_MAP_FD(BPF_REG_1, 0),
 +                      BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 +                                   BPF_FUNC_map_lookup_elem),
 +                      BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
 +                      BPF_ST_MEM(0, BPF_REG_10, -4, 0),
 +                      BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
 +                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
 +                      BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 +                      BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 +                                   BPF_FUNC_map_lookup_elem),
 +                      BPF_MOV64_REG(BPF_REG_0, 0),
 +                      BPF_EXIT_INSN(),
 +              },
 +              .fixup_map_in_map = { 3 },
 +              .result = ACCEPT,
 +      },
 +      {
 +              "invalid inner map pointer",
 +              .insns = {
 +                      BPF_ST_MEM(0, BPF_REG_10, -4, 0),
 +                      BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
 +                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
 +                      BPF_LD_MAP_FD(BPF_REG_1, 0),
 +                      BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 +                                   BPF_FUNC_map_lookup_elem),
 +                      BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
 +                      BPF_ST_MEM(0, BPF_REG_10, -4, 0),
 +                      BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
 +                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
 +                      BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 +                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
 +                      BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 +                                   BPF_FUNC_map_lookup_elem),
 +                      BPF_MOV64_REG(BPF_REG_0, 0),
 +                      BPF_EXIT_INSN(),
 +              },
 +              .fixup_map_in_map = { 3 },
 +              .errstr = "R1 type=inv expected=map_ptr",
 +              .errstr_unpriv = "R1 pointer arithmetic prohibited",
 +              .result = REJECT,
 +      },
 +      {
 +              "forgot null checking on the inner map pointer",
 +              .insns = {
 +                      BPF_ST_MEM(0, BPF_REG_10, -4, 0),
 +                      BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
 +                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
 +                      BPF_LD_MAP_FD(BPF_REG_1, 0),
 +                      BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 +                                   BPF_FUNC_map_lookup_elem),
 +                      BPF_ST_MEM(0, BPF_REG_10, -4, 0),
 +                      BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
 +                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
 +                      BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 +                      BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 +                                   BPF_FUNC_map_lookup_elem),
 +                      BPF_MOV64_REG(BPF_REG_0, 0),
 +                      BPF_EXIT_INSN(),
 +              },
 +              .fixup_map_in_map = { 3 },
 +              .errstr = "R1 type=map_value_or_null expected=map_ptr",
 +              .result = REJECT,
+       }
  };
  
  static int probe_filter_length(const struct bpf_insn *fp)
@@@ -4635,15 -4802,10 +4904,19 @@@ static void do_test_single(struct bpf_t
        struct bpf_insn *prog = test->insns;
        int prog_len = probe_filter_length(prog);
        int prog_type = test->prog_type;
++<<<<<<< HEAD
 +      int map_fds[MAX_NR_MAPS];
 +      int fd_prog, expected_ret;
++=======
+       int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1;
++>>>>>>> ea6b1720ce25f92f7a17b2e0c2b653d20773d10a
        const char *expected_err;
 +      int i;
 +
 +      for (i = 0; i < MAX_NR_MAPS; i++)
 +              map_fds[i] = -1;
  
 -      do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
 +      do_test_fixup(test, prog, map_fds);
  
        fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
                                   prog, prog_len, "GPL", 0, bpf_vlog,
        }
  
        (*passes)++;
-       printf("OK\n");
+       printf("OK%s\n", reject_from_alignment ?
+              " (NOTE: reject due to unknown alignment)" : "");
  close_fds:
        close(fd_prog);
 -      close(fd_f1);
 -      close(fd_f2);
 -      close(fd_f3);
 +      for (i = 0; i < MAX_NR_MAPS; i++)
 +              close(map_fds[i]);
        sched_yield();
        return;
  fail_log: